From 0463124499f4077185afc2b0b518fa0692aecb03 Mon Sep 17 00:00:00 2001 From: Roger Shimizu Date: Tue, 3 May 2022 17:51:47 +0100 Subject: [PATCH] Import android-platform-art_11.0.0+r48.orig.tar.xz [dgit import orig android-platform-art_11.0.0+r48.orig.tar.xz] --- .gitignore | 6 + Android.bp | 5 + Android.mk | 872 ++ CPPLINT.cfg | 33 + CleanSpec.mk | 110 + MODULE_LICENSE_APACHE2 | 0 NOTICE | 190 + OWNERS | 3 + PREUPLOAD.cfg | 11 + TEST_MAPPING | 10 + adbconnection/Android.bp | 76 + adbconnection/adbconnection.cc | 865 ++ adbconnection/adbconnection.h | 185 + benchmark/Android.bp | 60 + benchmark/const-class/info.txt | 1 + .../const-class/src/ConstClassBenchmark.java | 1071 ++ benchmark/const-string/info.txt | 1 + .../src/ConstStringBenchmark.java | 1067 ++ benchmark/jni-perf/info.txt | 1 + benchmark/jni-perf/perf_jni.cc | 39 + benchmark/jni-perf/src/JniPerfBenchmark.java | 52 + benchmark/jni_loader.cc | 32 + benchmark/jobject-benchmark/info.txt | 7 + .../jobject-benchmark/jobject_benchmark.cc | 104 + .../src/JObjectBenchmark.java | 37 + benchmark/micro-native/micro_native.cc | 146 + benchmark/scoped-primitive-array/info.txt | 1 + .../scoped_primitive_array.cc | 58 + .../src/ScopedPrimitiveArrayBenchmark.java | 91 + benchmark/string-indexof/info.txt | 1 + .../src/StringIndexOfBenchmark.java | 122 + benchmark/stringbuilder-append/info.txt | 1 + .../src/StringBuilderAppendBenchmark.java | 62 + benchmark/type-check/info.txt | 1 + .../type-check/src/TypeCheckBenchmark.java | 147 + build/Android.bp | 250 + build/Android.common.mk | 99 + build/Android.common_build.mk | 83 + build/Android.common_path.mk | 162 + build/Android.common_test.mk | 154 + build/Android.cpplint.mk | 72 + build/Android.gtest.mk | 746 ++ build/Android.oat.mk | 271 + build/apex/Android.bp | 459 + build/apex/art_apex_boot_integrity.rc | 21 + build/apex/art_apex_boot_integrity.sh | 55 + build/apex/art_apex_test.py | 1305 +++ build/apex/com.android.art.avbpubkey | Bin 0 -> 1032 bytes build/apex/com.android.art.pem | 51 + build/apex/com.android.art.pk8 | Bin 0 -> 2376 bytes build/apex/com.android.art.x509.pem | 35 + build/apex/ld.config.txt | 179 + build/apex/manifest-art.json | 4 + build/apex/runtests.sh | 202 + build/art.go | 433 + build/codegen.go | 218 + build/makevars.go | 47 + build/sdk/Android.bp | 53 + cmdline/Android.bp | 35 + cmdline/README.md | 245 + cmdline/cmdline.h | 422 + cmdline/cmdline_parse_result.h | 138 + cmdline/cmdline_parser.h | 633 ++ cmdline/cmdline_parser_test.cc | 593 + cmdline/cmdline_result.h | 102 + cmdline/cmdline_type_parser.h | 76 + cmdline/cmdline_types.h | 774 ++ cmdline/detail/cmdline_debug_detail.h | 40 + .../detail/cmdline_parse_argument_detail.h | 505 + cmdline/detail/cmdline_parser_detail.h | 128 + cmdline/memory_representation.h | 70 + cmdline/token_range.h | 427 + cmdline/unit.h | 37 + compiler/Android.bp | 490 + compiler/cfi_test.h | 156 + compiler/common_compiler_test.cc | 251 + compiler/common_compiler_test.h | 102 + compiler/compiled_method-inl.h | 55 + compiler/compiled_method.cc | 141 + compiler/compiled_method.h | 167 + compiler/compiler.cc | 71 + compiler/compiler.h | 127 + compiler/debug/debug_info.h | 46 + compiler/debug/dwarf/dwarf_test.cc | 333 + compiler/debug/dwarf/dwarf_test.h | 174 + compiler/debug/elf_compilation_unit.h | 39 + compiler/debug/elf_debug_frame_writer.h | 237 + compiler/debug/elf_debug_info_writer.h | 678 ++ compiler/debug/elf_debug_line_writer.h | 281 + compiler/debug/elf_debug_loc_writer.h | 331 + compiler/debug/elf_debug_writer.cc | 381 + compiler/debug/elf_debug_writer.h | 74 + compiler/debug/elf_symtab_writer.h | 123 + compiler/debug/method_debug_info.h | 51 + compiler/debug/src_map_elem.h | 43 + compiler/debug/src_map_elem_test.cc | 53 + compiler/dex/inline_method_analyser.cc | 737 ++ compiler/dex/inline_method_analyser.h | 158 + compiler/dex/verification_results.cc | 167 + compiler/dex/verification_results.h | 90 + compiler/dex/verified_method.cc | 111 + compiler/dex/verified_method.h | 80 + compiler/driver/compiled_method_storage.cc | 268 + compiler/driver/compiled_method_storage.h | 131 + .../driver/compiled_method_storage_test.cc | 101 + compiler/driver/compiler_options.cc | 207 + compiler/driver/compiler_options.h | 481 + compiler/driver/compiler_options_map-inl.h | 214 + .../driver/compiler_options_map-storage.h | 48 + compiler/driver/compiler_options_map.def | 69 + compiler/driver/compiler_options_map.h | 47 + compiler/driver/dex_compilation_unit.cc | 88 + compiler/driver/dex_compilation_unit.h | 184 + compiler/driver/simple_compiler_options_map.h | 64 + compiler/exception_test.cc | 242 + compiler/jit/jit_compiler.cc | 206 + compiler/jit/jit_compiler.h | 74 + compiler/jit/jit_logger.cc | 308 + compiler/jit/jit_logger.h | 138 + compiler/jni/jni_cfi_test.cc | 147 + compiler/jni/jni_cfi_test_expected.inc | 330 + compiler/jni/jni_compiler_test.cc | 2244 ++++ .../jni/quick/arm/calling_convention_arm.cc | 552 + .../jni/quick/arm/calling_convention_arm.h | 96 + .../quick/arm64/calling_convention_arm64.cc | 425 + .../quick/arm64/calling_convention_arm64.h | 92 + compiler/jni/quick/calling_convention.cc | 367 + compiler/jni/quick/calling_convention.h | 478 + compiler/jni/quick/jni_compiler.cc | 800 ++ compiler/jni/quick/jni_compiler.h | 72 + .../jni/quick/x86/calling_convention_x86.cc | 339 + .../jni/quick/x86/calling_convention_x86.h | 95 + .../quick/x86_64/calling_convention_x86_64.cc | 350 + .../quick/x86_64/calling_convention_x86_64.h | 90 + compiler/linker/linker_patch.h | 311 + compiler/linker/linker_patch_test.cc | 170 + compiler/linker/output_stream_test.cc | 139 + compiler/optimizing/block_builder.cc | 490 + compiler/optimizing/block_builder.h | 95 + .../optimizing/bounds_check_elimination.cc | 1977 ++++ .../optimizing/bounds_check_elimination.h | 50 + .../bounds_check_elimination_test.cc | 1109 ++ compiler/optimizing/builder.cc | 224 + compiler/optimizing/builder.h | 83 + compiler/optimizing/cha_guard_optimization.cc | 255 + compiler/optimizing/cha_guard_optimization.h | 43 + compiler/optimizing/code_generator.cc | 1758 +++ compiler/optimizing/code_generator.h | 994 ++ compiler/optimizing/code_generator_arm64.cc | 6575 +++++++++++ compiler/optimizing/code_generator_arm64.h | 962 ++ .../optimizing/code_generator_arm_vixl.cc | 9758 ++++++++++++++++ compiler/optimizing/code_generator_arm_vixl.h | 934 ++ compiler/optimizing/code_generator_utils.cc | 103 + compiler/optimizing/code_generator_utils.h | 45 + .../optimizing/code_generator_vector_arm64.cc | 1523 +++ .../code_generator_vector_arm_vixl.cc | 1054 ++ .../optimizing/code_generator_vector_x86.cc | 1387 +++ .../code_generator_vector_x86_64.cc | 1360 +++ compiler/optimizing/code_generator_x86.cc | 8568 ++++++++++++++ compiler/optimizing/code_generator_x86.h | 696 ++ compiler/optimizing/code_generator_x86_64.cc | 7780 +++++++++++++ compiler/optimizing/code_generator_x86_64.h | 659 ++ compiler/optimizing/code_sinking.cc | 441 + compiler/optimizing/code_sinking.h | 50 + compiler/optimizing/codegen_test.cc | 892 ++ compiler/optimizing/codegen_test_utils.h | 341 + compiler/optimizing/common_arm.h | 225 + compiler/optimizing/common_arm64.h | 378 + compiler/optimizing/common_dominator.h | 97 + compiler/optimizing/constant_folding.cc | 391 + compiler/optimizing/constant_folding.h | 54 + compiler/optimizing/constant_folding_test.cc | 850 ++ ...onstructor_fence_redundancy_elimination.cc | 263 + ...constructor_fence_redundancy_elimination.h | 65 + compiler/optimizing/data_type-inl.h | 71 + compiler/optimizing/data_type.cc | 55 + compiler/optimizing/data_type.h | 289 + compiler/optimizing/data_type_test.cc | 116 + compiler/optimizing/dead_code_elimination.cc | 532 + compiler/optimizing/dead_code_elimination.h | 53 + .../optimizing/dead_code_elimination_test.cc | 181 + compiler/optimizing/dominator_test.cc | 271 + compiler/optimizing/escape.cc | 111 + compiler/optimizing/escape.h | 68 + compiler/optimizing/find_loops_test.cc | 338 + compiler/optimizing/graph_checker.cc | 1144 ++ compiler/optimizing/graph_checker.h | 133 + compiler/optimizing/graph_checker_test.cc | 140 + compiler/optimizing/graph_test.cc | 312 + compiler/optimizing/graph_visualizer.cc | 938 ++ compiler/optimizing/graph_visualizer.h | 120 + compiler/optimizing/gvn.cc | 566 + compiler/optimizing/gvn.h | 46 + compiler/optimizing/gvn_test.cc | 434 + compiler/optimizing/induction_var_analysis.cc | 1453 +++ compiler/optimizing/induction_var_analysis.h | 288 + .../optimizing/induction_var_analysis_test.cc | 1365 +++ compiler/optimizing/induction_var_range.cc | 1394 +++ compiler/optimizing/induction_var_range.h | 352 + .../optimizing/induction_var_range_test.cc | 1035 ++ compiler/optimizing/inliner.cc | 2268 ++++ compiler/optimizing/inliner.h | 349 + compiler/optimizing/instruction_builder.cc | 3175 ++++++ compiler/optimizing/instruction_builder.h | 348 + compiler/optimizing/instruction_simplifier.cc | 2999 +++++ compiler/optimizing/instruction_simplifier.h | 59 + .../optimizing/instruction_simplifier_arm.cc | 298 + .../optimizing/instruction_simplifier_arm.h | 39 + .../instruction_simplifier_arm64.cc | 288 + .../optimizing/instruction_simplifier_arm64.h | 39 + .../instruction_simplifier_shared.cc | 339 + .../instruction_simplifier_shared.h | 66 + .../optimizing/instruction_simplifier_x86.cc | 88 + .../optimizing/instruction_simplifier_x86.h | 44 + .../instruction_simplifier_x86_64.cc | 82 + .../instruction_simplifier_x86_64.h | 48 + .../instruction_simplifier_x86_shared.cc | 137 + .../instruction_simplifier_x86_shared.h | 29 + compiler/optimizing/intrinsic_objects.cc | 98 + compiler/optimizing/intrinsic_objects.h | 81 + compiler/optimizing/intrinsics.cc | 374 + compiler/optimizing/intrinsics.h | 348 + compiler/optimizing/intrinsics_arm64.cc | 3437 ++++++ compiler/optimizing/intrinsics_arm64.h | 92 + compiler/optimizing/intrinsics_arm_vixl.cc | 3113 ++++++ compiler/optimizing/intrinsics_arm_vixl.h | 82 + compiler/optimizing/intrinsics_utils.h | 87 + compiler/optimizing/intrinsics_x86.cc | 3124 ++++++ compiler/optimizing/intrinsics_x86.h | 84 + compiler/optimizing/intrinsics_x86_64.cc | 2791 +++++ compiler/optimizing/intrinsics_x86_64.h | 84 + compiler/optimizing/licm.cc | 175 + compiler/optimizing/licm.h | 48 + compiler/optimizing/licm_test.cc | 214 + compiler/optimizing/linear_order.cc | 133 + compiler/optimizing/linear_order.h | 51 + compiler/optimizing/linearize_test.cc | 245 + compiler/optimizing/live_interval_test.cc | 332 + compiler/optimizing/live_ranges_test.cc | 429 + compiler/optimizing/liveness_test.cc | 589 + compiler/optimizing/load_store_analysis.cc | 182 + compiler/optimizing/load_store_analysis.h | 620 ++ .../optimizing/load_store_analysis_test.cc | 675 ++ compiler/optimizing/load_store_elimination.cc | 948 ++ compiler/optimizing/load_store_elimination.h | 51 + .../optimizing/load_store_elimination_test.cc | 893 ++ compiler/optimizing/locations.cc | 110 + compiler/optimizing/locations.h | 703 ++ compiler/optimizing/loop_analysis.cc | 413 + compiler/optimizing/loop_analysis.h | 183 + compiler/optimizing/loop_optimization.cc | 2484 +++++ compiler/optimizing/loop_optimization.h | 339 + compiler/optimizing/loop_optimization_test.cc | 320 + compiler/optimizing/nodes.cc | 3259 ++++++ compiler/optimizing/nodes.h | 8130 ++++++++++++++ compiler/optimizing/nodes_shared.cc | 92 + compiler/optimizing/nodes_shared.h | 258 + compiler/optimizing/nodes_test.cc | 159 + compiler/optimizing/nodes_vector.h | 1168 ++ compiler/optimizing/nodes_vector_test.cc | 416 + compiler/optimizing/nodes_x86.h | 219 + compiler/optimizing/optimization.cc | 321 + compiler/optimizing/optimization.h | 150 + compiler/optimizing/optimizing_cfi_test.cc | 210 + .../optimizing_cfi_test_expected.inc | 250 + compiler/optimizing/optimizing_compiler.cc | 1448 +++ compiler/optimizing/optimizing_compiler.h | 38 + .../optimizing/optimizing_compiler_stats.h | 191 + compiler/optimizing/optimizing_unit_test.h | 309 + compiler/optimizing/parallel_move_resolver.cc | 564 + compiler/optimizing/parallel_move_resolver.h | 204 + compiler/optimizing/parallel_move_test.cc | 645 ++ compiler/optimizing/pc_relative_fixups_x86.cc | 266 + compiler/optimizing/pc_relative_fixups_x86.h | 46 + .../prepare_for_register_allocation.cc | 316 + .../prepare_for_register_allocation.h | 70 + compiler/optimizing/pretty_printer.h | 149 + compiler/optimizing/pretty_printer_test.cc | 249 + .../optimizing/reference_type_propagation.cc | 1060 ++ .../optimizing/reference_type_propagation.h | 121 + .../reference_type_propagation_test.cc | 161 + .../register_allocation_resolver.cc | 702 ++ .../optimizing/register_allocation_resolver.h | 99 + compiler/optimizing/register_allocator.cc | 290 + compiler/optimizing/register_allocator.h | 94 + .../register_allocator_graph_color.cc | 2077 ++++ .../register_allocator_graph_color.h | 192 + .../register_allocator_linear_scan.cc | 1194 ++ .../register_allocator_linear_scan.h | 181 + .../optimizing/register_allocator_test.cc | 942 ++ compiler/optimizing/scheduler.cc | 840 ++ compiler/optimizing/scheduler.h | 574 + compiler/optimizing/scheduler_arm.cc | 1159 ++ compiler/optimizing/scheduler_arm.h | 166 + compiler/optimizing/scheduler_arm64.cc | 353 + compiler/optimizing/scheduler_arm64.h | 179 + compiler/optimizing/scheduler_test.cc | 462 + compiler/optimizing/select_generator.cc | 226 + compiler/optimizing/select_generator.h | 82 + compiler/optimizing/select_generator_test.cc | 106 + compiler/optimizing/sharpening.cc | 396 + compiler/optimizing/sharpening.h | 57 + compiler/optimizing/side_effects_analysis.cc | 89 + compiler/optimizing/side_effects_analysis.h | 69 + compiler/optimizing/side_effects_test.cc | 266 + compiler/optimizing/ssa_builder.cc | 737 ++ compiler/optimizing/ssa_builder.h | 148 + compiler/optimizing/ssa_liveness_analysis.cc | 552 + compiler/optimizing/ssa_liveness_analysis.h | 1336 +++ .../optimizing/ssa_liveness_analysis_test.cc | 218 + compiler/optimizing/ssa_phi_elimination.cc | 262 + compiler/optimizing/ssa_phi_elimination.h | 66 + compiler/optimizing/ssa_test.cc | 535 + compiler/optimizing/stack_map_stream.cc | 344 + compiler/optimizing/stack_map_stream.h | 160 + compiler/optimizing/stack_map_test.cc | 771 ++ compiler/optimizing/superblock_cloner.cc | 1151 ++ compiler/optimizing/superblock_cloner.h | 463 + compiler/optimizing/superblock_cloner_test.cc | 754 ++ compiler/optimizing/suspend_check_test.cc | 95 + compiler/optimizing/x86_memory_gen.cc | 86 + compiler/optimizing/x86_memory_gen.h | 46 + compiler/trampolines/trampoline_compiler.cc | 216 + compiler/trampolines/trampoline_compiler.h | 48 + compiler/utils/arm/assembler_arm_shared.h | 46 + compiler/utils/arm/assembler_arm_vixl.cc | 504 + compiler/utils/arm/assembler_arm_vixl.h | 273 + compiler/utils/arm/constants_arm.cc | 32 + compiler/utils/arm/constants_arm.h | 135 + .../utils/arm/jni_macro_assembler_arm_vixl.cc | 744 ++ .../utils/arm/jni_macro_assembler_arm_vixl.h | 250 + compiler/utils/arm/managed_register_arm.cc | 103 + compiler/utils/arm/managed_register_arm.h | 275 + .../utils/arm/managed_register_arm_test.cc | 767 ++ compiler/utils/arm64/assembler_arm64.cc | 206 + compiler/utils/arm64/assembler_arm64.h | 172 + .../utils/arm64/jni_macro_assembler_arm64.cc | 835 ++ .../utils/arm64/jni_macro_assembler_arm64.h | 253 + .../utils/arm64/managed_register_arm64.cc | 108 + compiler/utils/arm64/managed_register_arm64.h | 226 + .../arm64/managed_register_arm64_test.cc | 741 ++ compiler/utils/assembler.cc | 105 + compiler/utils/assembler.h | 413 + compiler/utils/assembler_test.h | 1620 +++ compiler/utils/assembler_test_base.h | 614 + compiler/utils/assembler_thumb_test.cc | 368 + .../assembler_thumb_test_expected.cc.inc | 267 + compiler/utils/atomic_dex_ref_map-inl.h | 150 + compiler/utils/atomic_dex_ref_map.h | 85 + compiler/utils/atomic_dex_ref_map_test.cc | 74 + compiler/utils/dedupe_set-inl.h | 255 + compiler/utils/dedupe_set.h | 62 + compiler/utils/dedupe_set_test.cc | 91 + compiler/utils/jni_macro_assembler.cc | 92 + compiler/utils/jni_macro_assembler.h | 313 + compiler/utils/jni_macro_assembler_test.h | 153 + compiler/utils/label.h | 120 + compiler/utils/managed_register.h | 135 + compiler/utils/stack_checks.h | 43 + compiler/utils/swap_space.cc | 223 + compiler/utils/swap_space.h | 241 + compiler/utils/swap_space_test.cc | 83 + compiler/utils/x86/assembler_x86.cc | 4042 +++++++ compiler/utils/x86/assembler_x86.h | 957 ++ compiler/utils/x86/assembler_x86_test.cc | 1233 ++ compiler/utils/x86/constants_x86.h | 115 + compiler/utils/x86/jni_macro_assembler_x86.cc | 614 + compiler/utils/x86/jni_macro_assembler_x86.h | 187 + compiler/utils/x86/managed_register_x86.cc | 120 + compiler/utils/x86/managed_register_x86.h | 225 + .../utils/x86/managed_register_x86_test.cc | 360 + compiler/utils/x86_64/assembler_x86_64.cc | 5558 +++++++++ compiler/utils/x86_64/assembler_x86_64.h | 1078 ++ .../utils/x86_64/assembler_x86_64_test.cc | 2444 ++++ compiler/utils/x86_64/constants_x86_64.h | 142 + .../x86_64/jni_macro_assembler_x86_64.cc | 677 ++ .../utils/x86_64/jni_macro_assembler_x86_64.h | 213 + .../utils/x86_64/managed_register_x86_64.cc | 115 + .../utils/x86_64/managed_register_x86_64.h | 211 + .../x86_64/managed_register_x86_64_test.cc | 359 + dalvikvm/Android.bp | 61 + dalvikvm/dalvikvm.cc | 220 + dex2oat/Android.bp | 502 + dex2oat/common_compiler_driver_test.cc | 142 + dex2oat/common_compiler_driver_test.h | 74 + dex2oat/dex/dex_to_dex_compiler.cc | 678 ++ dex2oat/dex/dex_to_dex_compiler.h | 126 + dex2oat/dex/dex_to_dex_decompiler_test.cc | 115 + dex2oat/dex/quick_compiler_callbacks.cc | 68 + dex2oat/dex/quick_compiler_callbacks.h | 81 + dex2oat/dex2oat.cc | 3313 ++++++ dex2oat/dex2oat_image_test.cc | 667 ++ dex2oat/dex2oat_options.cc | 284 + dex2oat/dex2oat_options.def | 96 + dex2oat/dex2oat_options.h | 79 + dex2oat/dex2oat_test.cc | 2598 +++++ dex2oat/dex2oat_vdex_test.cc | 135 + dex2oat/driver/compiler_driver-inl.h | 104 + dex2oat/driver/compiler_driver.cc | 2908 +++++ dex2oat/driver/compiler_driver.h | 354 + dex2oat/driver/compiler_driver_test.cc | 369 + dex2oat/include/dex2oat_return_codes.h | 33 + .../linker/arm/relative_patcher_arm_base.cc | 551 + .../linker/arm/relative_patcher_arm_base.h | 160 + dex2oat/linker/arm/relative_patcher_thumb2.cc | 203 + dex2oat/linker/arm/relative_patcher_thumb2.h | 78 + .../arm/relative_patcher_thumb2_test.cc | 1397 +++ .../linker/arm64/relative_patcher_arm64.cc | 453 + dex2oat/linker/arm64/relative_patcher_arm64.h | 88 + .../arm64/relative_patcher_arm64_test.cc | 1502 +++ dex2oat/linker/elf_writer.cc | 64 + dex2oat/linker/elf_writer.h | 97 + dex2oat/linker/elf_writer_quick.cc | 335 + dex2oat/linker/elf_writer_quick.h | 39 + dex2oat/linker/elf_writer_test.cc | 143 + dex2oat/linker/image_test.cc | 180 + dex2oat/linker/image_test.h | 408 + dex2oat/linker/image_write_read_test.cc | 160 + dex2oat/linker/image_writer.cc | 3693 ++++++ dex2oat/linker/image_writer.h | 753 ++ dex2oat/linker/index_bss_mapping_encoder.h | 87 + .../linker/index_bss_mapping_encoder_test.cc | 104 + dex2oat/linker/multi_oat_relative_patcher.cc | 86 + dex2oat/linker/multi_oat_relative_patcher.h | 181 + .../linker/multi_oat_relative_patcher_test.cc | 316 + dex2oat/linker/oat_writer.cc | 4255 +++++++ dex2oat/linker/oat_writer.h | 563 + dex2oat/linker/oat_writer_test.cc | 907 ++ dex2oat/linker/relative_patcher.cc | 155 + dex2oat/linker/relative_patcher.h | 175 + dex2oat/linker/relative_patcher_test.h | 403 + dex2oat/linker/x86/relative_patcher_x86.cc | 73 + dex2oat/linker/x86/relative_patcher_x86.h | 44 + .../linker/x86/relative_patcher_x86_base.cc | 58 + .../linker/x86/relative_patcher_x86_base.h | 53 + .../linker/x86/relative_patcher_x86_test.cc | 177 + .../linker/x86_64/relative_patcher_x86_64.cc | 51 + .../linker/x86_64/relative_patcher_x86_64.h | 44 + .../x86_64/relative_patcher_x86_64_test.cc | 180 + dex2oat/verifier_deps_test.cc | 1493 +++ dexdump/Android.bp | 68 + dexdump/dexdump.cc | 1900 ++++ dexdump/dexdump.h | 62 + dexdump/dexdump_cfg.cc | 355 + dexdump/dexdump_cfg.h | 31 + dexdump/dexdump_main.cc | 157 + dexdump/dexdump_test.cc | 80 + dexlayout/Android.bp | 268 + dexlayout/compact_dex_writer.cc | 537 + dexlayout/compact_dex_writer.h | 185 + dexlayout/dex_container.h | 87 + dexlayout/dex_ir.cc | 176 + dexlayout/dex_ir.h | 1362 +++ dexlayout/dex_ir_builder.cc | 1263 +++ dexlayout/dex_ir_builder.h | 39 + dexlayout/dex_verify.cc | 1048 ++ dexlayout/dex_verify.h | 117 + dexlayout/dex_visualize.cc | 345 + dexlayout/dex_visualize.h | 45 + dexlayout/dex_writer.cc | 1009 ++ dexlayout/dex_writer.h | 286 + dexlayout/dexdiag.cc | 540 + dexlayout/dexdiag_test.cc | 146 + dexlayout/dexlayout.cc | 2021 ++++ dexlayout/dexlayout.h | 202 + dexlayout/dexlayout_main.cc | 235 + dexlayout/dexlayout_test.cc | 835 ++ dexlist/Android.bp | 57 + dexlist/dexlist.cc | 287 + dexlist/dexlist_test.cc | 81 + dexoptanalyzer/Android.bp | 74 + dexoptanalyzer/dexoptanalyzer.cc | 394 + dexoptanalyzer/dexoptanalyzer_test.cc | 338 + disassembler/Android.bp | 89 + disassembler/disassembler.cc | 83 + disassembler/disassembler.h | 103 + disassembler/disassembler_arm.cc | 253 + disassembler/disassembler_arm.h | 54 + disassembler/disassembler_arm64.cc | 126 + disassembler/disassembler_arm64.h | 92 + disassembler/disassembler_x86.cc | 1619 +++ disassembler/disassembler_x86.h | 52 + dt_fd_forward/Android.bp | 63 + ...ODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION | 0 dt_fd_forward/NOTICE | 30 + dt_fd_forward/README.md | 32 + dt_fd_forward/dt_fd_forward.cc | 789 ++ dt_fd_forward/dt_fd_forward.h | 156 + dt_fd_forward/export/Android.bp | 27 + dt_fd_forward/export/MODULE_LICENSE_APACHE2 | 0 dt_fd_forward/export/fd_transport.h | 75 + imgdiag/Android.bp | 88 + imgdiag/imgdiag.cc | 1969 ++++ imgdiag/imgdiag_test.cc | 138 + libartbase/Android.bp | 296 + libartbase/arch/instruction_set.cc | 122 + libartbase/arch/instruction_set.h | 303 + libartbase/arch/instruction_set_test.cc | 62 + libartbase/base/aborting.h | 31 + libartbase/base/allocator.cc | 99 + libartbase/base/allocator.h | 160 + libartbase/base/arena_allocator-inl.h | 34 + libartbase/base/arena_allocator.cc | 347 + libartbase/base/arena_allocator.h | 403 + libartbase/base/arena_allocator_test.cc | 353 + libartbase/base/arena_bit_vector.cc | 98 + libartbase/base/arena_bit_vector.h | 58 + libartbase/base/arena_containers.h | 244 + libartbase/base/arena_object.h | 72 + libartbase/base/array_ref.h | 209 + libartbase/base/array_slice.h | 171 + libartbase/base/atomic.h | 145 + libartbase/base/bit_field.h | 92 + libartbase/base/bit_field_test.cc | 37 + libartbase/base/bit_memory_region.h | 362 + libartbase/base/bit_memory_region_test.cc | 101 + libartbase/base/bit_string.h | 299 + libartbase/base/bit_string_test.cc | 168 + libartbase/base/bit_struct.h | 300 + libartbase/base/bit_struct_detail.h | 163 + libartbase/base/bit_struct_test.cc | 329 + libartbase/base/bit_table.h | 486 + libartbase/base/bit_table_test.cc | 195 + libartbase/base/bit_utils.h | 509 + libartbase/base/bit_utils_iterator.h | 113 + libartbase/base/bit_utils_test.cc | 523 + libartbase/base/bit_vector-inl.h | 100 + libartbase/base/bit_vector.cc | 375 + libartbase/base/bit_vector.h | 296 + libartbase/base/bit_vector_test.cc | 272 + libartbase/base/bounded_fifo.h | 75 + libartbase/base/casts.h | 192 + libartbase/base/common_art_test.cc | 638 ++ libartbase/base/common_art_test.h | 356 + libartbase/base/data_hash.h | 107 + libartbase/base/dchecked_vector.h | 228 + libartbase/base/debug_stack.h | 147 + libartbase/base/dumpable.h | 55 + libartbase/base/endian_utils.h | 108 + libartbase/base/enums.cc | 32 + libartbase/base/enums.h | 38 + libartbase/base/file_magic.cc | 64 + libartbase/base/file_magic.h | 38 + libartbase/base/file_utils.cc | 497 + libartbase/base/file_utils.h | 124 + libartbase/base/file_utils_test.cc | 165 + libartbase/base/globals.h | 108 + libartbase/base/globals_unix.cc | 75 + libartbase/base/hash_map.h | 77 + libartbase/base/hash_set.h | 781 ++ libartbase/base/hash_set_test.cc | 384 + libartbase/base/hex_dump.cc | 112 + libartbase/base/hex_dump.h | 55 + libartbase/base/hex_dump_test.cc | 72 + libartbase/base/hiddenapi_domain.h | 41 + libartbase/base/hiddenapi_flags.cc | 27 + libartbase/base/hiddenapi_flags.h | 358 + libartbase/base/hiddenapi_stubs.h | 64 + libartbase/base/histogram-inl.h | 283 + libartbase/base/histogram.h | 134 + libartbase/base/histogram_test.cc | 270 + libartbase/base/indenter.h | 167 + libartbase/base/indenter_test.cc | 40 + libartbase/base/intrusive_forward_list.h | 477 + .../base/intrusive_forward_list_test.cc | 779 ++ libartbase/base/iteration_range.h | 75 + libartbase/base/leb128.h | 377 + libartbase/base/leb128_test.cc | 310 + libartbase/base/length_prefixed_array.h | 121 + libartbase/base/logging.cc | 223 + libartbase/base/logging.h | 154 + libartbase/base/logging_test.cc | 60 + libartbase/base/macros.h | 107 + libartbase/base/malloc_arena_pool.cc | 162 + libartbase/base/malloc_arena_pool.h | 48 + libartbase/base/mem_map.cc | 1297 +++ libartbase/base/mem_map.h | 427 + libartbase/base/mem_map_fuchsia.cc | 144 + libartbase/base/mem_map_test.cc | 902 ++ libartbase/base/mem_map_unix.cc | 35 + libartbase/base/mem_map_windows.cc | 140 + libartbase/base/membarrier.cc | 87 + libartbase/base/membarrier.h | 51 + libartbase/base/membarrier_test.cc | 111 + libartbase/base/memfd.cc | 132 + libartbase/base/memfd.h | 77 + libartbase/base/memfd_test.cc | 30 + libartbase/base/memory_region.cc | 32 + libartbase/base/memory_region.h | 163 + libartbase/base/memory_region_test.cc | 58 + libartbase/base/memory_tool.h | 91 + libartbase/base/mman.h | 43 + libartbase/base/os.h | 63 + libartbase/base/os_linux.cc | 106 + libartbase/base/runtime_debug.cc | 74 + libartbase/base/runtime_debug.h | 61 + libartbase/base/safe_copy.cc | 83 + libartbase/base/safe_copy.h | 31 + libartbase/base/safe_copy_test.cc | 111 + libartbase/base/safe_map.h | 184 + libartbase/base/scoped_arena_allocator.cc | 195 + libartbase/base/scoped_arena_allocator.h | 192 + libartbase/base/scoped_arena_containers.h | 283 + libartbase/base/scoped_flock.cc | 151 + libartbase/base/scoped_flock.h | 88 + libartbase/base/scoped_flock_test.cc | 62 + libartbase/base/sdk_version.h | 58 + libartbase/base/socket_peer_is_trusted.cc | 52 + libartbase/base/socket_peer_is_trusted.h | 27 + libartbase/base/stats.h | 60 + libartbase/base/stl_util.h | 228 + libartbase/base/stl_util_identity.h | 41 + libartbase/base/stride_iterator.h | 150 + libartbase/base/string_view_cpp20.h | 40 + libartbase/base/strlcpy.h | 38 + libartbase/base/systrace.h | 103 + libartbase/base/time_utils.cc | 255 + libartbase/base/time_utils.h | 106 + libartbase/base/time_utils_test.cc | 58 + libartbase/base/to_str.h | 50 + libartbase/base/tracking_safe_map.h | 32 + libartbase/base/transform_array_ref.h | 196 + libartbase/base/transform_array_ref_test.cc | 206 + libartbase/base/transform_iterator.h | 178 + libartbase/base/transform_iterator_test.cc | 530 + libartbase/base/unix_file/README | 15 + libartbase/base/unix_file/fd_file.cc | 645 ++ libartbase/base/unix_file/fd_file.h | 169 + libartbase/base/unix_file/fd_file_test.cc | 292 + .../base/unix_file/random_access_file.h | 68 + .../base/unix_file/random_access_file_test.h | 183 + .../unix_file/random_access_file_utils.cc | 40 + .../base/unix_file/random_access_file_utils.h | 30 + libartbase/base/utils.cc | 377 + libartbase/base/utils.h | 167 + libartbase/base/utils_test.cc | 118 + libartbase/base/value_object.h | 31 + libartbase/base/variant_map.h | 470 + libartbase/base/variant_map_test.cc | 189 + libartbase/base/zip_archive.cc | 274 + libartbase/base/zip_archive.h | 107 + libartbase/base/zip_archive_test.cc | 67 + libartbase/libartbase.map | 15 + libartimagevalues/Android.bp | 17 + libartimagevalues/art_image_values.cpp | 37 + libartimagevalues/art_image_values.h | 34 + libartpalette/Android.bp | 105 + libartpalette/apex/palette.cc | 166 + libartpalette/apex/palette_test.cc | 64 + libartpalette/include/palette/palette.h | 38 + .../include/palette/palette_method_list.h | 36 + libartpalette/include/palette/palette_types.h | 39 + libartpalette/libartpalette.map.txt | 33 + libartpalette/system/palette_fake.cc | 89 + libartpalette/system/palette_system.h | 37 + libdexfile/Android.bp | 459 + libdexfile/dex/art_dex_file_loader.cc | 569 + libdexfile/dex/art_dex_file_loader.h | 156 + libdexfile/dex/art_dex_file_loader_test.cc | 315 + libdexfile/dex/base64_test_util.h | 100 + libdexfile/dex/bytecode_utils.h | 147 + libdexfile/dex/class_accessor-inl.h | 247 + libdexfile/dex/class_accessor.h | 399 + libdexfile/dex/class_accessor_test.cc | 90 + libdexfile/dex/class_iterator.h | 101 + libdexfile/dex/class_reference.h | 42 + libdexfile/dex/code_item_accessors-inl.h | 248 + libdexfile/dex/code_item_accessors.h | 190 + libdexfile/dex/code_item_accessors_test.cc | 114 + libdexfile/dex/compact_dex_file.cc | 108 + libdexfile/dex/compact_dex_file.h | 305 + libdexfile/dex/compact_dex_file_test.cc | 101 + libdexfile/dex/compact_dex_level.h | 49 + libdexfile/dex/compact_dex_utils.h | 37 + libdexfile/dex/compact_offset_table.cc | 133 + libdexfile/dex/compact_offset_table.h | 69 + libdexfile/dex/compact_offset_table_test.cc | 89 + libdexfile/dex/descriptors_names.cc | 418 + libdexfile/dex/descriptors_names.h | 63 + libdexfile/dex/descriptors_names_test.cc | 108 + libdexfile/dex/dex_file-inl.h | 472 + libdexfile/dex/dex_file.cc | 689 ++ libdexfile/dex/dex_file.h | 998 ++ libdexfile/dex/dex_file_exception_helpers.cc | 105 + libdexfile/dex/dex_file_exception_helpers.h | 74 + libdexfile/dex/dex_file_layout.cc | 113 + libdexfile/dex/dex_file_layout.h | 127 + libdexfile/dex/dex_file_loader.cc | 510 + libdexfile/dex/dex_file_loader.h | 203 + libdexfile/dex/dex_file_loader_test.cc | 487 + libdexfile/dex/dex_file_reference.h | 52 + libdexfile/dex/dex_file_structs.h | 298 + libdexfile/dex/dex_file_tracking_registrar.cc | 246 + libdexfile/dex/dex_file_tracking_registrar.h | 81 + libdexfile/dex/dex_file_types.h | 118 + libdexfile/dex/dex_file_verifier.cc | 3702 ++++++ libdexfile/dex/dex_file_verifier.h | 40 + libdexfile/dex/dex_file_verifier_test.cc | 2175 ++++ libdexfile/dex/dex_instruction-inl.h | 606 + libdexfile/dex/dex_instruction.cc | 571 + libdexfile/dex/dex_instruction.h | 764 ++ libdexfile/dex/dex_instruction_iterator.h | 236 + libdexfile/dex/dex_instruction_list.h | 308 + libdexfile/dex/dex_instruction_test.cc | 178 + libdexfile/dex/dex_instruction_utils.h | 219 + libdexfile/dex/invoke_type.h | 39 + libdexfile/dex/method_reference.h | 91 + libdexfile/dex/modifiers.cc | 58 + libdexfile/dex/modifiers.h | 157 + libdexfile/dex/primitive.cc | 73 + libdexfile/dex/primitive.h | 226 + libdexfile/dex/primitive_test.cc | 123 + libdexfile/dex/signature-inl.h | 82 + libdexfile/dex/signature.cc | 90 + libdexfile/dex/signature.h | 69 + libdexfile/dex/standard_dex_file.cc | 88 + libdexfile/dex/standard_dex_file.h | 141 + libdexfile/dex/string_reference.h | 71 + libdexfile/dex/string_reference_test.cc | 110 + libdexfile/dex/test_dex_file_builder.h | 401 + libdexfile/dex/test_dex_file_builder_test.cc | 86 + libdexfile/dex/type_lookup_table.cc | 176 + libdexfile/dex/type_lookup_table.h | 181 + libdexfile/dex/type_lookup_table_test.cc | 62 + libdexfile/dex/type_reference.h | 55 + libdexfile/dex/utf-inl.h | 99 + libdexfile/dex/utf.cc | 324 + libdexfile/dex/utf.h | 135 + libdexfile/dex/utf_test.cc | 408 + libdexfile/external/dex_file_ext.cc | 342 + libdexfile/external/dex_file_ext_c_test.c | 55 + libdexfile/external/dex_file_supp.cc | 118 + libdexfile/external/dex_file_supp_test.cc | 300 + .../include/art_api/dex_file_external.h | 89 + .../include/art_api/dex_file_support.h | 241 + .../external/libdexfile_external.map.txt | 13 + libelffile/Android.bp | 60 + libelffile/dwarf/debug_abbrev_writer.h | 98 + libelffile/dwarf/debug_frame_opcode_writer.h | 343 + libelffile/dwarf/debug_info_entry_writer.h | 228 + libelffile/dwarf/debug_line_opcode_writer.h | 261 + libelffile/dwarf/dwarf_constants.h | 686 ++ libelffile/dwarf/expression.h | 121 + libelffile/dwarf/headers.h | 162 + libelffile/dwarf/register.h | 62 + libelffile/dwarf/writer.h | 184 + libelffile/elf/elf_builder.h | 976 ++ libelffile/elf/elf_debug_reader.h | 174 + libelffile/elf/elf_utils.h | 204 + libelffile/elf/xz_utils.cc | 132 + libelffile/elf/xz_utils.h | 31 + libelffile/stream/buffered_output_stream.cc | 70 + libelffile/stream/buffered_output_stream.h | 54 + .../stream/error_delaying_output_stream.h | 103 + libelffile/stream/file_output_stream.cc | 40 + libelffile/stream/file_output_stream.h | 46 + libelffile/stream/output_stream.cc | 31 + libelffile/stream/output_stream.h | 64 + libelffile/stream/vector_output_stream.cc | 48 + libelffile/stream/vector_output_stream.h | 69 + libnativebridge/.clang-format | 1 + libnativebridge/Android.bp | 79 + libnativebridge/OWNERS | 4 + .../include/nativebridge/native_bridge.h | 426 + libnativebridge/libnativebridge.map.txt | 46 + libnativebridge/native_bridge.cc | 662 ++ libnativebridge/native_bridge_lazy.cc | 172 + libnativebridge/tests/Android.bp | 163 + .../tests/CodeCacheCreate_test.cpp | 51 + .../tests/CodeCacheExists_test.cpp | 54 + .../tests/CodeCacheStatFail_test.cpp | 51 + libnativebridge/tests/CompleteFlow_test.cpp | 47 + libnativebridge/tests/DummyNativeBridge.cpp | 53 + libnativebridge/tests/DummyNativeBridge2.cpp | 76 + libnativebridge/tests/DummyNativeBridge3.cpp | 124 + libnativebridge/tests/DummyNativeBridge6.cpp | 133 + .../tests/InvalidCharsNativeBridge_test.cpp | 40 + .../tests/NativeBridge2Signal_test.cpp | 42 + .../NativeBridge3CreateNamespace_test.cpp | 40 + .../tests/NativeBridge3GetError_test.cpp | 39 + ...tiveBridge3InitAnonymousNamespace_test.cpp | 39 + .../NativeBridge3IsPathSupported_test.cpp | 39 + .../NativeBridge3LoadLibraryExt_test.cpp | 39 + .../tests/NativeBridge3UnloadLibrary_test.cpp | 39 + .../tests/NativeBridge6PreZygoteFork_lib.cpp | 29 + .../tests/NativeBridge6PreZygoteFork_lib.h | 27 + .../tests/NativeBridge6PreZygoteFork_test.cpp | 40 + libnativebridge/tests/NativeBridgeApi.c | 25 + libnativebridge/tests/NativeBridgeTest.h | 39 + .../tests/NativeBridgeVersion_test.cpp | 38 + .../tests/NeedsNativeBridge_test.cpp | 36 + .../PreInitializeNativeBridgeFail1_test.cpp | 40 + .../PreInitializeNativeBridgeFail2_test.cpp | 39 + .../tests/PreInitializeNativeBridge_test.cpp | 68 + .../tests/ReSetupNativeBridge_test.cpp | 30 + .../tests/UnavailableNativeBridge_test.cpp | 29 + .../tests/ValidNameNativeBridge_test.cpp | 35 + libnativeloader/.clang-format | 1 + libnativeloader/Android.bp | 116 + libnativeloader/OWNERS | 6 + libnativeloader/README.md | 84 + libnativeloader/TEST_MAPPING | 12 + .../include/nativeloader/dlext_namespaces.h | 114 + .../include/nativeloader/native_loader.h | 78 + libnativeloader/libnativeloader.map.txt | 31 + libnativeloader/library_namespaces.cpp | 359 + libnativeloader/library_namespaces.h | 72 + libnativeloader/native_loader.cpp | 254 + libnativeloader/native_loader_lazy.cpp | 102 + libnativeloader/native_loader_namespace.cpp | 177 + libnativeloader/native_loader_namespace.h | 78 + libnativeloader/native_loader_test.cpp | 678 ++ libnativeloader/public_libraries.cpp | 452 + libnativeloader/public_libraries.h | 69 + libnativeloader/test/Android.bp | 85 + libnativeloader/test/Android.mk | 57 + libnativeloader/test/api_test.c | 25 + .../test/public.libraries-oem1.txt | 2 + .../test/public.libraries-oem2.txt | 2 + .../test/public.libraries-product1.txt | 2 + libnativeloader/test/runtest.sh | 11 + .../src/android/test/app/TestActivity.java | 44 + .../test/system/AndroidManifest.xml | 31 + libnativeloader/test/test.cpp | 21 + .../test/vendor/AndroidManifest.xml | 31 + libnativeloader/utils.h | 30 + libprofile/Android.bp | 193 + libprofile/profile/profile_boot_info.cc | 125 + libprofile/profile/profile_boot_info.h | 71 + libprofile/profile/profile_boot_info_test.cc | 113 + .../profile/profile_compilation_info.cc | 2395 ++++ libprofile/profile/profile_compilation_info.h | 1056 ++ .../profile/profile_compilation_info_test.cc | 1847 +++ libprofile/profile/profile_helpers.h | 56 + oatdump/Android.bp | 146 + oatdump/Android.mk | 65 + oatdump/oatdump.cc | 3680 ++++++ oatdump/oatdump_app_test.cc | 54 + oatdump/oatdump_image_test.cc | 50 + oatdump/oatdump_test.cc | 115 + oatdump/oatdump_test.h | 382 + openjdkjvm/Android.bp | 56 + ...ODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION | 0 openjdkjvm/NOTICE | 29 + openjdkjvm/OpenjdkJvm.cc | 481 + openjdkjvmti/Android.bp | 107 + ...ODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION | 0 openjdkjvmti/NOTICE | 29 + openjdkjvmti/OpenjdkJvmTi.cc | 1715 +++ openjdkjvmti/README.md | 7 + openjdkjvmti/alloc_manager.cc | 218 + openjdkjvmti/alloc_manager.h | 115 + openjdkjvmti/art_jvmti.h | 349 + openjdkjvmti/deopt_manager.cc | 503 + openjdkjvmti/deopt_manager.h | 202 + openjdkjvmti/events-inl.h | 701 ++ openjdkjvmti/events.cc | 1687 +++ openjdkjvmti/events.h | 453 + openjdkjvmti/fixed_up_dex_file.cc | 161 + openjdkjvmti/fixed_up_dex_file.h | 83 + openjdkjvmti/include/CPPLINT.cfg | 18 + openjdkjvmti/include/jvmti.h | 2534 +++++ openjdkjvmti/jvmti_allocator.h | 175 + openjdkjvmti/jvmti_weak_table-inl.h | 407 + openjdkjvmti/jvmti_weak_table.h | 227 + openjdkjvmti/object_tagging.cc | 99 + openjdkjvmti/object_tagging.h | 107 + openjdkjvmti/ti_allocator.cc | 95 + openjdkjvmti/ti_allocator.h | 65 + openjdkjvmti/ti_breakpoint.cc | 194 + openjdkjvmti/ti_breakpoint.h | 98 + openjdkjvmti/ti_class.cc | 1140 ++ openjdkjvmti/ti_class.h | 100 + openjdkjvmti/ti_class_definition.cc | 373 + openjdkjvmti/ti_class_definition.h | 207 + openjdkjvmti/ti_class_loader-inl.h | 74 + openjdkjvmti/ti_class_loader.cc | 190 + openjdkjvmti/ti_class_loader.h | 99 + openjdkjvmti/ti_ddms.cc | 90 + openjdkjvmti/ti_ddms.h | 53 + openjdkjvmti/ti_dump.cc | 93 + openjdkjvmti/ti_dump.h | 52 + openjdkjvmti/ti_extension.cc | 711 ++ openjdkjvmti/ti_extension.h | 60 + openjdkjvmti/ti_field.cc | 339 + openjdkjvmti/ti_field.h | 81 + openjdkjvmti/ti_heap.cc | 1984 ++++ openjdkjvmti/ti_heap.h | 113 + openjdkjvmti/ti_jni.cc | 91 + openjdkjvmti/ti_jni.h | 58 + openjdkjvmti/ti_logging.cc | 72 + openjdkjvmti/ti_logging.h | 101 + openjdkjvmti/ti_method.cc | 1294 +++ openjdkjvmti/ti_method.h | 114 + openjdkjvmti/ti_monitor.cc | 437 + openjdkjvmti/ti_monitor.h | 63 + openjdkjvmti/ti_object.cc | 133 + openjdkjvmti/ti_object.h | 51 + openjdkjvmti/ti_phase.cc | 159 + openjdkjvmti/ti_phase.h | 69 + openjdkjvmti/ti_properties.cc | 236 + openjdkjvmti/ti_properties.h | 51 + openjdkjvmti/ti_redefine.cc | 3153 ++++++ openjdkjvmti/ti_redefine.h | 379 + openjdkjvmti/ti_search.cc | 408 + openjdkjvmti/ti_search.h | 59 + openjdkjvmti/ti_stack.cc | 1436 +++ openjdkjvmti/ti_stack.h | 131 + openjdkjvmti/ti_thread.cc | 1142 ++ openjdkjvmti/ti_thread.h | 192 + openjdkjvmti/ti_threadgroup.cc | 281 + openjdkjvmti/ti_threadgroup.h | 60 + openjdkjvmti/ti_timers.cc | 93 + openjdkjvmti/ti_timers.h | 51 + openjdkjvmti/transform.cc | 377 + openjdkjvmti/transform.h | 73 + perfetto_hprof/Android.bp | 99 + perfetto_hprof/perfetto_hprof.cc | 784 ++ perfetto_hprof/perfetto_hprof.h | 42 + profman/Android.bp | 102 + profman/boot_image_profile.cc | 284 + profman/boot_image_profile.h | 85 + profman/profile_assistant.cc | 213 + profman/profile_assistant.h | 108 + profman/profile_assistant_test.cc | 1581 +++ profman/profman.cc | 1568 +++ runtime/Android.bp | 768 ++ runtime/aot_class_linker.cc | 246 + runtime/aot_class_linker.h | 73 + runtime/arch/arch_test.cc | 147 + runtime/arch/arm/asm_support_arm.S | 272 + runtime/arch/arm/asm_support_arm.h | 64 + runtime/arch/arm/callee_save_frame_arm.h | 111 + runtime/arch/arm/context_arm.cc | 118 + runtime/arch/arm/context_arm.h | 98 + runtime/arch/arm/entrypoints_init_arm.cc | 201 + runtime/arch/arm/fault_handler_arm.cc | 237 + .../arch/arm/instruction_set_features_arm.cc | 346 + .../arch/arm/instruction_set_features_arm.h | 112 + .../arm/instruction_set_features_arm_test.cc | 127 + .../instruction_set_features_assembly_tests.S | 65 + runtime/arch/arm/jni_entrypoints_arm.S | 249 + runtime/arch/arm/jni_frame_arm.h | 72 + runtime/arch/arm/memcmp16_arm.S | 236 + runtime/arch/arm/quick_entrypoints_arm.S | 2698 +++++ runtime/arch/arm/quick_entrypoints_cc_arm.cc | 112 + runtime/arch/arm/registers_arm.cc | 47 + runtime/arch/arm/registers_arm.h | 98 + runtime/arch/arm/thread_arm.cc | 37 + runtime/arch/arm64/asm_support_arm64.S | 340 + runtime/arch/arm64/asm_support_arm64.h | 51 + runtime/arch/arm64/callee_save_frame_arm64.h | 134 + runtime/arch/arm64/context_arm64.cc | 155 + runtime/arch/arm64/context_arm64.h | 102 + runtime/arch/arm64/entrypoints_init_arm64.cc | 197 + runtime/arch/arm64/fault_handler_arm64.cc | 193 + .../arm64/instruction_set_features_arm64.cc | 415 + .../arm64/instruction_set_features_arm64.h | 149 + .../instruction_set_features_arm64_test.cc | 233 + runtime/arch/arm64/jni_entrypoints_arm64.S | 232 + runtime/arch/arm64/jni_frame_arm64.h | 79 + runtime/arch/arm64/memcmp16_arm64.S | 143 + runtime/arch/arm64/quick_entrypoints_arm64.S | 2685 +++++ runtime/arch/arm64/registers_arm64.cc | 75 + runtime/arch/arm64/registers_arm64.h | 194 + runtime/arch/arm64/thread_arm64.cc | 37 + runtime/arch/context-inl.h | 49 + runtime/arch/context.cc | 25 + runtime/arch/context.h | 108 + runtime/arch/instruction_set_features.cc | 262 + runtime/arch/instruction_set_features.h | 146 + runtime/arch/instruction_set_features_test.cc | 307 + runtime/arch/memcmp16.cc | 44 + runtime/arch/memcmp16.h | 66 + runtime/arch/memcmp16_test.cc | 167 + runtime/arch/quick_alloc_entrypoints.S | 253 + runtime/arch/stub_test.cc | 1991 ++++ runtime/arch/x86/asm_support_x86.S | 274 + runtime/arch/x86/asm_support_x86.h | 29 + runtime/arch/x86/callee_save_frame_x86.h | 98 + runtime/arch/x86/context_x86.cc | 132 + runtime/arch/x86/context_x86.h | 111 + runtime/arch/x86/entrypoints_init_x86.cc | 130 + runtime/arch/x86/fault_handler_x86.cc | 475 + .../arch/x86/instruction_set_features_x86.cc | 349 + .../arch/x86/instruction_set_features_x86.h | 153 + .../x86/instruction_set_features_x86_test.cc | 182 + runtime/arch/x86/jni_entrypoints_x86.S | 223 + runtime/arch/x86/jni_frame_x86.h | 68 + runtime/arch/x86/memcmp16_x86.S | 1038 ++ runtime/arch/x86/quick_entrypoints_x86.S | 2528 +++++ runtime/arch/x86/registers_x86.cc | 37 + runtime/arch/x86/registers_x86.h | 59 + runtime/arch/x86/thread_x86.cc | 194 + runtime/arch/x86_64/asm_support_x86_64.S | 435 + runtime/arch/x86_64/asm_support_x86_64.h | 29 + .../arch/x86_64/callee_save_frame_x86_64.h | 109 + runtime/arch/x86_64/context_x86_64.cc | 130 + runtime/arch/x86_64/context_x86_64.h | 102 + .../arch/x86_64/entrypoints_init_x86_64.cc | 145 + .../x86_64/instruction_set_features_x86_64.h | 98 + .../instruction_set_features_x86_64_test.cc | 35 + runtime/arch/x86_64/jni_entrypoints_x86_64.S | 262 + runtime/arch/x86_64/jni_frame_x86_64.h | 85 + runtime/arch/x86_64/memcmp16_x86_64.S | 1210 ++ .../arch/x86_64/quick_entrypoints_x86_64.S | 2284 ++++ runtime/arch/x86_64/registers_x86_64.cc | 47 + runtime/arch/x86_64/registers_x86_64.h | 74 + runtime/arch/x86_64/thread_x86_64.cc | 82 + runtime/art_field-inl.h | 412 + runtime/art_field.cc | 78 + runtime/art_field.h | 261 + runtime/art_method-inl.h | 434 + runtime/art_method.cc | 889 ++ runtime/art_method.h | 930 ++ runtime/asm_support.h | 26 + runtime/backtrace_helper.cc | 139 + runtime/backtrace_helper.h | 75 + runtime/barrier.cc | 114 + runtime/barrier.h | 95 + runtime/barrier_test.cc | 130 + runtime/base/callee_save_type.h | 47 + runtime/base/locks.cc | 402 + runtime/base/locks.h | 387 + runtime/base/mem_map_arena_pool.cc | 165 + runtime/base/mem_map_arena_pool.h | 49 + runtime/base/mutator_locked_dumpable.h | 56 + runtime/base/mutex-inl.h | 314 + runtime/base/mutex.cc | 1089 ++ runtime/base/mutex.h | 539 + runtime/base/mutex_test.cc | 174 + runtime/base/quasi_atomic.cc | 67 + runtime/base/quasi_atomic.h | 178 + runtime/base/timing_logger.cc | 258 + runtime/base/timing_logger.h | 225 + runtime/base/timing_logger_test.cc | 178 + runtime/cha.cc | 720 ++ runtime/cha.h | 179 + runtime/cha_test.cc | 93 + runtime/check_reference_map_visitor.h | 120 + runtime/class_linker-inl.h | 485 + runtime/class_linker.cc | 9883 ++++++++++++++++ runtime/class_linker.h | 1501 +++ runtime/class_linker_test.cc | 1775 +++ runtime/class_loader_context.cc | 1436 +++ runtime/class_loader_context.h | 372 + runtime/class_loader_context_test.cc | 1680 +++ runtime/class_loader_utils.h | 185 + runtime/class_root.cc | 36 + runtime/class_root.h | 187 + runtime/class_status.h | 106 + runtime/class_table-inl.h | 147 + runtime/class_table.cc | 303 + runtime/class_table.h | 302 + runtime/class_table_test.cc | 163 + runtime/common_dex_operations.h | 259 + runtime/common_runtime_test.cc | 674 ++ runtime/common_runtime_test.h | 323 + runtime/common_throws.cc | 904 ++ runtime/common_throws.h | 287 + runtime/compiler_callbacks.h | 88 + runtime/compiler_filter.cc | 247 + runtime/compiler_filter.h | 112 + runtime/compiler_filter_test.cc | 70 + runtime/debug_print.cc | 188 + runtime/debug_print.h | 37 + runtime/debugger.cc | 1107 ++ runtime/debugger.h | 156 + runtime/deoptimization_kind.h | 55 + runtime/dex/dex_file_annotations.cc | 1819 +++ runtime/dex/dex_file_annotations.h | 171 + runtime/dex2oat_environment_test.h | 242 + runtime/dex_reference_collection.h | 85 + runtime/dex_register_location.cc | 50 + runtime/dex_register_location.h | 93 + runtime/dex_to_dex_decompiler.cc | 208 + runtime/dex_to_dex_decompiler.h | 46 + runtime/dexopt_test.cc | 235 + runtime/dexopt_test.h | 88 + runtime/elf_file.cc | 1904 ++++ runtime/elf_file.h | 119 + runtime/elf_file_impl.h | 235 + runtime/entrypoints/entrypoint_utils-inl.h | 772 ++ runtime/entrypoints/entrypoint_utils.cc | 284 + runtime/entrypoints/entrypoint_utils.h | 223 + runtime/entrypoints/jni/jni_entrypoints.cc | 80 + runtime/entrypoints/jni/jni_entrypoints.h | 40 + runtime/entrypoints/math_entrypoints.cc | 51 + runtime/entrypoints/math_entrypoints.h | 29 + runtime/entrypoints/math_entrypoints_test.cc | 75 + runtime/entrypoints/quick/callee_save_frame.h | 90 + .../quick/quick_alloc_entrypoints.cc | 261 + .../quick/quick_alloc_entrypoints.h | 43 + .../quick/quick_cast_entrypoints.cc | 38 + .../entrypoints/quick/quick_default_externs.h | 136 + .../quick/quick_default_init_entrypoints.h | 136 + .../quick/quick_deoptimization_entrypoints.cc | 80 + .../quick/quick_dexcache_entrypoints.cc | 222 + runtime/entrypoints/quick/quick_entrypoints.h | 118 + .../quick/quick_entrypoints_enum.cc | 127 + .../quick/quick_entrypoints_enum.h | 69 + .../quick/quick_entrypoints_list.h | 213 + .../quick/quick_field_entrypoints.cc | 407 + .../quick/quick_fillarray_entrypoints.cc | 36 + .../quick/quick_jni_entrypoints.cc | 280 + .../quick/quick_lock_entrypoints.cc | 65 + .../quick/quick_math_entrypoints.cc | 82 + ...quick_string_builder_append_entrypoints.cc | 30 + .../quick/quick_thread_entrypoints.cc | 37 + .../quick/quick_throw_entrypoints.cc | 159 + .../quick/quick_trampoline_entrypoints.cc | 2699 +++++ .../quick_trampoline_entrypoints_test.cc | 100 + runtime/entrypoints/runtime_asm_entrypoints.h | 105 + runtime/entrypoints_order_test.cc | 428 + runtime/exec_utils.cc | 101 + runtime/exec_utils.h | 34 + runtime/exec_utils_test.cc | 105 + runtime/experimental_flags.h | 84 + runtime/fault_handler.cc | 405 + runtime/fault_handler.h | 150 + runtime/gc/accounting/atomic_stack.h | 294 + runtime/gc/accounting/bitmap-inl.h | 152 + runtime/gc/accounting/bitmap.cc | 105 + runtime/gc/accounting/bitmap.h | 194 + runtime/gc/accounting/card_table-inl.h | 241 + runtime/gc/accounting/card_table.cc | 141 + runtime/gc/accounting/card_table.h | 179 + runtime/gc/accounting/card_table_test.cc | 151 + runtime/gc/accounting/heap_bitmap-inl.h | 121 + runtime/gc/accounting/heap_bitmap.cc | 59 + runtime/gc/accounting/heap_bitmap.h | 86 + runtime/gc/accounting/mod_union_table-inl.h | 45 + runtime/gc/accounting/mod_union_table.cc | 611 + runtime/gc/accounting/mod_union_table.h | 206 + runtime/gc/accounting/mod_union_table_test.cc | 270 + runtime/gc/accounting/read_barrier_table.h | 123 + runtime/gc/accounting/remembered_set.cc | 187 + runtime/gc/accounting/remembered_set.h | 87 + runtime/gc/accounting/space_bitmap-inl.h | 226 + runtime/gc/accounting/space_bitmap.cc | 221 + runtime/gc/accounting/space_bitmap.h | 274 + runtime/gc/accounting/space_bitmap_test.cc | 262 + runtime/gc/allocation_listener.h | 71 + runtime/gc/allocation_record.cc | 257 + runtime/gc/allocation_record.h | 312 + runtime/gc/allocator/dlmalloc.cc | 106 + runtime/gc/allocator/dlmalloc.h | 61 + runtime/gc/allocator/rosalloc-inl.h | 126 + runtime/gc/allocator/rosalloc.cc | 2194 ++++ runtime/gc/allocator/rosalloc.h | 953 ++ runtime/gc/allocator_type.h | 49 + runtime/gc/collector/concurrent_copying-inl.h | 275 + runtime/gc/collector/concurrent_copying.cc | 3870 +++++++ runtime/gc/collector/concurrent_copying.h | 513 + runtime/gc/collector/garbage_collector.cc | 327 + runtime/gc/collector/garbage_collector.h | 175 + runtime/gc/collector/gc_type.h | 46 + runtime/gc/collector/immune_region.cc | 38 + runtime/gc/collector/immune_region.h | 86 + runtime/gc/collector/immune_spaces.cc | 129 + runtime/gc/collector/immune_spaces.h | 95 + runtime/gc/collector/immune_spaces_test.cc | 405 + runtime/gc/collector/iteration.h | 101 + runtime/gc/collector/mark_sweep-inl.h | 61 + runtime/gc/collector/mark_sweep.cc | 1515 +++ runtime/gc/collector/mark_sweep.h | 378 + runtime/gc/collector/object_byte_pair.h | 44 + runtime/gc/collector/partial_mark_sweep.cc | 49 + runtime/gc/collector/partial_mark_sweep.h | 50 + runtime/gc/collector/semi_space-inl.h | 86 + runtime/gc/collector/semi_space.cc | 668 ++ runtime/gc/collector/semi_space.h | 255 + runtime/gc/collector/sticky_mark_sweep.cc | 82 + runtime/gc/collector/sticky_mark_sweep.h | 64 + runtime/gc/collector_type.h | 79 + runtime/gc/gc_cause.cc | 61 + runtime/gc/gc_cause.h | 75 + runtime/gc/gc_pause_listener.h | 34 + runtime/gc/heap-inl.h | 478 + runtime/gc/heap-visit-objects-inl.h | 169 + runtime/gc/heap.cc | 4321 +++++++ runtime/gc/heap.h | 1631 +++ runtime/gc/heap_test.cc | 115 + runtime/gc/heap_verification_test.cc | 186 + runtime/gc/racing_check.h | 31 + runtime/gc/reference_processor.cc | 368 + runtime/gc/reference_processor.h | 118 + runtime/gc/reference_queue.cc | 204 + runtime/gc/reference_queue.h | 129 + runtime/gc/reference_queue_test.cc | 99 + runtime/gc/scoped_gc_critical_section.cc | 74 + runtime/gc/scoped_gc_critical_section.h | 78 + runtime/gc/space/bump_pointer_space-inl.h | 96 + .../gc/space/bump_pointer_space-walk-inl.h | 100 + runtime/gc/space/bump_pointer_space.cc | 241 + runtime/gc/space/bump_pointer_space.h | 210 + runtime/gc/space/dlmalloc_space-inl.h | 77 + runtime/gc/space/dlmalloc_space.cc | 411 + runtime/gc/space/dlmalloc_space.h | 194 + .../gc/space/dlmalloc_space_random_test.cc | 37 + .../gc/space/dlmalloc_space_static_test.cc | 37 + runtime/gc/space/image_space.cc | 4205 +++++++ runtime/gc/space/image_space.h | 343 + runtime/gc/space/image_space_fs.h | 111 + runtime/gc/space/image_space_loading_order.h | 33 + runtime/gc/space/image_space_test.cc | 475 + runtime/gc/space/large_object_space.cc | 661 ++ runtime/gc/space/large_object_space.h | 248 + runtime/gc/space/large_object_space_test.cc | 187 + runtime/gc/space/malloc_space.cc | 294 + runtime/gc/space/malloc_space.h | 215 + .../gc/space/memory_tool_malloc_space-inl.h | 298 + runtime/gc/space/memory_tool_malloc_space.h | 67 + runtime/gc/space/memory_tool_settings.h | 32 + runtime/gc/space/region_space-inl.h | 529 + runtime/gc/space/region_space.cc | 1088 ++ runtime/gc/space/region_space.h | 787 ++ runtime/gc/space/rosalloc_space-inl.h | 81 + runtime/gc/space/rosalloc_space.cc | 498 + runtime/gc/space/rosalloc_space.h | 210 + .../gc/space/rosalloc_space_random_test.cc | 41 + .../gc/space/rosalloc_space_static_test.cc | 40 + runtime/gc/space/space-inl.h | 61 + runtime/gc/space/space.cc | 142 + runtime/gc/space/space.h | 477 + runtime/gc/space/space_create_test.cc | 361 + runtime/gc/space/space_test.h | 393 + runtime/gc/space/zygote_space.cc | 143 + runtime/gc/space/zygote_space.h | 104 + runtime/gc/system_weak.h | 98 + runtime/gc/system_weak_test.cc | 216 + runtime/gc/task_processor.cc | 138 + runtime/gc/task_processor.h | 89 + runtime/gc/task_processor_test.cc | 148 + runtime/gc/verification.cc | 243 + runtime/gc/verification.h | 82 + runtime/gc/weak_root_state.h | 41 + runtime/gc_root-inl.h | 51 + runtime/gc_root.h | 327 + runtime/gtest_test.cc | 20 + runtime/handle.h | 203 + runtime/handle_scope-inl.h | 265 + runtime/handle_scope.h | 281 + runtime/handle_scope_test.cc | 123 + runtime/handle_wrapper.h | 65 + runtime/heap_poisoning.h | 41 + runtime/hidden_api.cc | 566 + runtime/hidden_api.h | 497 + runtime/hidden_api_test.cc | 694 ++ runtime/hprof/hprof.cc | 1615 +++ runtime/hprof/hprof.h | 30 + runtime/image-inl.h | 120 + runtime/image.cc | 202 + runtime/image.h | 590 + runtime/imt_conflict_table.h | 217 + runtime/imtable-inl.h | 104 + runtime/imtable.h | 97 + runtime/imtable_test.cc | 104 + runtime/index_bss_mapping.cc | 72 + runtime/index_bss_mapping.h | 82 + runtime/indirect_reference_table-inl.h | 124 + runtime/indirect_reference_table.cc | 520 + runtime/indirect_reference_table.h | 423 + runtime/indirect_reference_table_test.cc | 517 + runtime/instrumentation.cc | 1653 +++ runtime/instrumentation.h | 798 ++ runtime/instrumentation_test.cc | 989 ++ runtime/intern_table-inl.h | 120 + runtime/intern_table.cc | 484 + runtime/intern_table.h | 351 + runtime/intern_table_test.cc | 237 + runtime/interpreter/cfi_asm_support.h | 59 + runtime/interpreter/interpreter.cc | 727 ++ runtime/interpreter/interpreter.h | 82 + runtime/interpreter/interpreter_cache.cc | 32 + runtime/interpreter/interpreter_cache.h | 99 + runtime/interpreter/interpreter_common.cc | 1602 +++ runtime/interpreter/interpreter_common.h | 1050 ++ runtime/interpreter/interpreter_intrinsics.cc | 632 ++ runtime/interpreter/interpreter_intrinsics.h | 41 + runtime/interpreter/interpreter_mterp_impl.h | 45 + .../interpreter/interpreter_switch_impl-inl.h | 2010 ++++ runtime/interpreter/interpreter_switch_impl.h | 80 + .../interpreter/interpreter_switch_impl0.cc | 30 + .../interpreter/interpreter_switch_impl1.cc | 30 + .../interpreter/interpreter_switch_impl2.cc | 30 + .../interpreter/interpreter_switch_impl3.cc | 30 + runtime/interpreter/lock_count_data.cc | 111 + runtime/interpreter/lock_count_data.h | 74 + runtime/interpreter/mterp/README.txt | 81 + runtime/interpreter/mterp/arm/arithmetic.S | 975 ++ runtime/interpreter/mterp/arm/array.S | 250 + runtime/interpreter/mterp/arm/control_flow.S | 209 + .../interpreter/mterp/arm/floating_point.S | 482 + runtime/interpreter/mterp/arm/invoke.S | 121 + runtime/interpreter/mterp/arm/main.S | 775 ++ runtime/interpreter/mterp/arm/object.S | 322 + runtime/interpreter/mterp/arm/other.S | 385 + runtime/interpreter/mterp/arm64/arithmetic.S | 507 + runtime/interpreter/mterp/arm64/array.S | 235 + .../interpreter/mterp/arm64/control_flow.S | 223 + .../interpreter/mterp/arm64/floating_point.S | 318 + runtime/interpreter/mterp/arm64/invoke.S | 110 + runtime/interpreter/mterp/arm64/main.S | 797 ++ runtime/interpreter/mterp/arm64/object.S | 308 + runtime/interpreter/mterp/arm64/other.S | 359 + runtime/interpreter/mterp/common/gen_setup.py | 90 + runtime/interpreter/mterp/gen_mterp.py | 98 + runtime/interpreter/mterp/mterp.cc | 963 ++ runtime/interpreter/mterp/mterp.h | 65 + runtime/interpreter/mterp/mterp_stub.cc | 52 + runtime/interpreter/mterp/nterp.cc | 606 + runtime/interpreter/mterp/nterp_stub.cc | 56 + runtime/interpreter/mterp/x86/arithmetic.S | 943 ++ runtime/interpreter/mterp/x86/array.S | 215 + runtime/interpreter/mterp/x86/control_flow.S | 219 + .../interpreter/mterp/x86/floating_point.S | 250 + runtime/interpreter/mterp/x86/invoke.S | 121 + runtime/interpreter/mterp/x86/main.S | 804 ++ runtime/interpreter/mterp/x86/object.S | 278 + runtime/interpreter/mterp/x86/other.S | 328 + runtime/interpreter/mterp/x86_64/arithmetic.S | 625 ++ runtime/interpreter/mterp/x86_64/array.S | 178 + .../interpreter/mterp/x86_64/control_flow.S | 206 + .../interpreter/mterp/x86_64/floating_point.S | 250 + runtime/interpreter/mterp/x86_64/invoke.S | 115 + runtime/interpreter/mterp/x86_64/main.S | 759 ++ runtime/interpreter/mterp/x86_64/object.S | 254 + runtime/interpreter/mterp/x86_64/other.S | 297 + runtime/interpreter/mterp/x86_64ng/array.S | 151 + .../interpreter/mterp/x86_64ng/control_flow.S | 179 + runtime/interpreter/mterp/x86_64ng/invoke.S | 181 + runtime/interpreter/mterp/x86_64ng/main.S | 2018 ++++ runtime/interpreter/mterp/x86_64ng/object.S | 204 + runtime/interpreter/mterp/x86_64ng/other.S | 273 + runtime/interpreter/safe_math.h | 65 + runtime/interpreter/safe_math_test.cc | 132 + runtime/interpreter/shadow_frame-inl.h | 41 + runtime/interpreter/shadow_frame.cc | 46 + runtime/interpreter/shadow_frame.h | 465 + runtime/interpreter/unstarted_runtime.cc | 2091 ++++ runtime/interpreter/unstarted_runtime.h | 112 + runtime/interpreter/unstarted_runtime_list.h | 110 + runtime/interpreter/unstarted_runtime_test.cc | 1385 +++ runtime/intrinsics_enum.h | 35 + runtime/intrinsics_list.h | 246 + runtime/java_frame_root_info.cc | 41 + runtime/java_frame_root_info.h | 67 + runtime/jdwp_provider.h | 57 + runtime/jit/TEST_MAPPING | 8 + runtime/jit/debugger_interface.cc | 605 + runtime/jit/debugger_interface.h | 73 + runtime/jit/jit-inl.h | 67 + runtime/jit/jit.cc | 1865 ++++ runtime/jit/jit.h | 527 + runtime/jit/jit_code_cache.cc | 1971 ++++ runtime/jit/jit_code_cache.h | 576 + runtime/jit/jit_memory_region.cc | 600 + runtime/jit/jit_memory_region.h | 288 + runtime/jit/jit_memory_region_test.cc | 516 + runtime/jit/jit_scoped_code_cache_write.h | 67 + runtime/jit/profile_saver.cc | 1003 ++ runtime/jit/profile_saver.h | 166 + runtime/jit/profile_saver_options.h | 155 + runtime/jit/profile_saver_test.cc | 110 + runtime/jit/profiling_info.cc | 113 + runtime/jit/profiling_info.h | 181 + runtime/jit/profiling_info_test.cc | 300 + runtime/jni/check_jni.cc | 4087 +++++++ runtime/jni/check_jni.h | 29 + runtime/jni/java_vm_ext.cc | 1246 +++ runtime/jni/java_vm_ext.h | 271 + runtime/jni/java_vm_ext_test.cc | 184 + runtime/jni/jni_env_ext-inl.h | 54 + runtime/jni/jni_env_ext.cc | 334 + runtime/jni/jni_env_ext.h | 243 + runtime/jni/jni_id_manager.cc | 690 ++ runtime/jni/jni_id_manager.h | 128 + runtime/jni/jni_internal.cc | 3364 ++++++ runtime/jni/jni_internal.h | 149 + runtime/jni/jni_internal_test.cc | 2656 +++++ runtime/jni_id_type.h | 41 + runtime/jvalue-inl.h | 50 + runtime/jvalue.h | 94 + runtime/linear_alloc.cc | 60 + runtime/linear_alloc.h | 65 + runtime/lock_word-inl.h | 71 + runtime/lock_word.h | 324 + runtime/managed_stack-inl.h | 44 + runtime/managed_stack.cc | 57 + runtime/managed_stack.h | 174 + runtime/mapping_table.h | 206 + runtime/method_handles-inl.h | 242 + runtime/method_handles.cc | 1303 +++ runtime/method_handles.h | 148 + runtime/method_handles_test.cc | 381 + runtime/mirror/accessible_object.h | 50 + runtime/mirror/array-alloc-inl.h | 185 + runtime/mirror/array-inl.h | 320 + runtime/mirror/array.cc | 172 + runtime/mirror/array.h | 272 + runtime/mirror/call_site-inl.h | 34 + runtime/mirror/call_site.h | 48 + runtime/mirror/class-alloc-inl.h | 88 + runtime/mirror/class-inl.h | 1222 ++ runtime/mirror/class-refvisitor-inl.h | 97 + runtime/mirror/class.cc | 1822 +++ runtime/mirror/class.h | 1576 +++ runtime/mirror/class_ext-inl.h | 233 + runtime/mirror/class_ext.cc | 147 + runtime/mirror/class_ext.h | 193 + runtime/mirror/class_flags.h | 71 + runtime/mirror/class_loader-inl.h | 51 + runtime/mirror/class_loader.h | 94 + runtime/mirror/dex_cache-inl.h | 450 + runtime/mirror/dex_cache.cc | 307 + runtime/mirror/dex_cache.h | 599 + runtime/mirror/dex_cache_test.cc | 183 + runtime/mirror/emulated_stack_frame-inl.h | 50 + runtime/mirror/emulated_stack_frame.cc | 279 + runtime/mirror/emulated_stack_frame.h | 97 + runtime/mirror/executable-inl.h | 57 + runtime/mirror/executable.cc | 43 + runtime/mirror/executable.h | 103 + runtime/mirror/field-inl.h | 115 + runtime/mirror/field.cc | 58 + runtime/mirror/field.h | 124 + runtime/mirror/iftable-inl.h | 76 + runtime/mirror/iftable.h | 69 + runtime/mirror/method.cc | 68 + runtime/mirror/method.h | 55 + runtime/mirror/method_handle_impl-inl.h | 39 + runtime/mirror/method_handle_impl.cc | 73 + runtime/mirror/method_handle_impl.h | 148 + runtime/mirror/method_handles_lookup.cc | 67 + runtime/mirror/method_handles_lookup.h | 71 + runtime/mirror/method_type-inl.h | 42 + runtime/mirror/method_type.cc | 175 + runtime/mirror/method_type.h | 106 + runtime/mirror/method_type_test.cc | 109 + runtime/mirror/object-inl.h | 954 ++ runtime/mirror/object-readbarrier-inl.h | 197 + runtime/mirror/object-refvisitor-inl.h | 96 + runtime/mirror/object.cc | 299 + runtime/mirror/object.h | 795 ++ runtime/mirror/object_array-alloc-inl.h | 82 + runtime/mirror/object_array-inl.h | 380 + runtime/mirror/object_array.h | 218 + runtime/mirror/object_reference-inl.h | 55 + runtime/mirror/object_reference.h | 183 + runtime/mirror/object_test.cc | 872 ++ runtime/mirror/proxy.h | 40 + runtime/mirror/reference-inl.h | 54 + runtime/mirror/reference.h | 143 + runtime/mirror/stack_trace_element-inl.h | 42 + runtime/mirror/stack_trace_element.cc | 63 + runtime/mirror/stack_trace_element.h | 70 + runtime/mirror/string-alloc-inl.h | 257 + runtime/mirror/string-inl.h | 108 + runtime/mirror/string.cc | 375 + runtime/mirror/string.h | 286 + runtime/mirror/throwable.cc | 175 + runtime/mirror/throwable.h | 68 + runtime/mirror/var_handle.cc | 2047 ++++ runtime/mirror/var_handle.h | 300 + runtime/mirror/var_handle_test.cc | 1108 ++ runtime/module_exclusion_test.cc | 193 + runtime/monitor-inl.h | 85 + runtime/monitor.cc | 1737 +++ runtime/monitor.h | 480 + runtime/monitor_android.cc | 108 + runtime/monitor_linux.cc | 24 + runtime/monitor_objects_stack_visitor.cc | 103 + runtime/monitor_objects_stack_visitor.h | 87 + runtime/monitor_pool.cc | 159 + runtime/monitor_pool.h | 250 + runtime/monitor_pool_test.cc | 127 + runtime/monitor_test.cc | 379 + .../dalvik_system_BaseDexClassLoader.cc | 70 + .../native/dalvik_system_BaseDexClassLoader.h | 29 + runtime/native/dalvik_system_DexFile.cc | 963 ++ runtime/native/dalvik_system_DexFile.h | 34 + runtime/native/dalvik_system_VMDebug.cc | 673 ++ runtime/native/dalvik_system_VMDebug.h | 28 + runtime/native/dalvik_system_VMRuntime.cc | 837 ++ runtime/native/dalvik_system_VMRuntime.h | 28 + runtime/native/dalvik_system_VMStack.cc | 185 + runtime/native/dalvik_system_VMStack.h | 28 + runtime/native/dalvik_system_ZygoteHooks.cc | 450 + runtime/native/dalvik_system_ZygoteHooks.h | 28 + runtime/native/java_lang_Class.cc | 1020 ++ runtime/native/java_lang_Class.h | 28 + runtime/native/java_lang_Object.cc | 69 + runtime/native/java_lang_Object.h | 28 + runtime/native/java_lang_String.cc | 128 + runtime/native/java_lang_String.h | 28 + runtime/native/java_lang_StringFactory.cc | 102 + runtime/native/java_lang_StringFactory.h | 28 + runtime/native/java_lang_System.cc | 258 + runtime/native/java_lang_System.h | 28 + runtime/native/java_lang_Thread.cc | 223 + runtime/native/java_lang_Thread.h | 28 + runtime/native/java_lang_Throwable.cc | 50 + runtime/native/java_lang_Throwable.h | 28 + runtime/native/java_lang_VMClassLoader.cc | 167 + runtime/native/java_lang_VMClassLoader.h | 28 + .../java_lang_invoke_MethodHandleImpl.cc | 79 + .../java_lang_invoke_MethodHandleImpl.h | 28 + .../java_lang_ref_FinalizerReference.cc | 54 + .../native/java_lang_ref_FinalizerReference.h | 28 + runtime/native/java_lang_ref_Reference.cc | 54 + runtime/native/java_lang_ref_Reference.h | 28 + runtime/native/java_lang_reflect_Array.cc | 86 + runtime/native/java_lang_reflect_Array.h | 28 + .../native/java_lang_reflect_Constructor.cc | 141 + .../native/java_lang_reflect_Constructor.h | 28 + .../native/java_lang_reflect_Executable.cc | 408 + runtime/native/java_lang_reflect_Executable.h | 28 + runtime/native/java_lang_reflect_Field.cc | 542 + runtime/native/java_lang_reflect_Field.h | 28 + runtime/native/java_lang_reflect_Method.cc | 99 + runtime/native/java_lang_reflect_Method.h | 28 + runtime/native/java_lang_reflect_Parameter.cc | 112 + runtime/native/java_lang_reflect_Parameter.h | 28 + runtime/native/java_lang_reflect_Proxy.cc | 48 + runtime/native/java_lang_reflect_Proxy.h | 28 + .../java_util_concurrent_atomic_AtomicLong.cc | 41 + .../java_util_concurrent_atomic_AtomicLong.h | 28 + runtime/native/libcore_util_CharsetUtils.cc | 267 + runtime/native/libcore_util_CharsetUtils.h | 28 + runtime/native/native_util.h | 45 + ...rg_apache_harmony_dalvik_ddmc_DdmServer.cc | 49 + ...org_apache_harmony_dalvik_ddmc_DdmServer.h | 28 + ...pache_harmony_dalvik_ddmc_DdmVmInternal.cc | 245 + ...apache_harmony_dalvik_ddmc_DdmVmInternal.h | 28 + .../scoped_fast_native_object_access-inl.h | 37 + .../native/scoped_fast_native_object_access.h | 42 + runtime/native/sun_misc_Unsafe.cc | 616 + runtime/native/sun_misc_Unsafe.h | 28 + runtime/native_bridge_art_interface.cc | 150 + runtime/native_bridge_art_interface.h | 42 + runtime/native_stack_dump.cc | 434 + runtime/native_stack_dump.h | 44 + runtime/non_debuggable_classes.cc | 42 + runtime/non_debuggable_classes.h | 42 + runtime/noop_compiler_callbacks.h | 42 + runtime/nterp_helpers.cc | 160 + runtime/nterp_helpers.h | 79 + runtime/nth_caller_visitor.h | 67 + runtime/oat.cc | 432 + runtime/oat.h | 143 + runtime/oat_file-inl.h | 119 + runtime/oat_file.cc | 2268 ++++ runtime/oat_file.h | 600 + runtime/oat_file_assistant.cc | 1044 ++ runtime/oat_file_assistant.h | 454 + runtime/oat_file_assistant_test.cc | 1569 +++ runtime/oat_file_manager.cc | 1104 ++ runtime/oat_file_manager.h | 201 + runtime/oat_file_test.cc | 96 + runtime/oat_quick_method_header.cc | 102 + runtime/oat_quick_method_header.h | 186 + runtime/obj_ptr-inl.h | 187 + runtime/obj_ptr.h | 194 + runtime/object_callbacks.h | 49 + runtime/object_lock.cc | 70 + runtime/object_lock.h | 71 + runtime/offsets.cc | 27 + runtime/offsets.h | 75 + runtime/parsed_options.cc | 854 ++ runtime/parsed_options.h | 84 + runtime/parsed_options_test.cc | 184 + runtime/plugin.cc | 90 + runtime/plugin.h | 83 + runtime/prebuilt_tools_test.cc | 66 + runtime/process_state.h | 33 + runtime/proxy_test.cc | 194 + runtime/proxy_test.h | 123 + runtime/quick/quick_method_frame_info.h | 65 + runtime/quick_exception_handler.cc | 695 ++ runtime/quick_exception_handler.h | 171 + runtime/quicken_info.h | 85 + runtime/read_barrier-inl.h | 268 + runtime/read_barrier.cc | 24 + runtime/read_barrier.h | 136 + runtime/read_barrier_config.h | 97 + runtime/read_barrier_option.h | 65 + runtime/reference_table.cc | 363 + runtime/reference_table.h | 72 + runtime/reference_table_test.cc | 338 + runtime/reflection-inl.h | 132 + runtime/reflection.cc | 1058 ++ runtime/reflection.h | 145 + runtime/reflection_test.cc | 637 ++ runtime/reflective_handle.h | 126 + runtime/reflective_handle_scope-inl.h | 71 + runtime/reflective_handle_scope.cc | 35 + runtime/reflective_handle_scope.h | 207 + runtime/reflective_reference.h | 71 + runtime/reflective_value_visitor.cc | 54 + runtime/reflective_value_visitor.h | 182 + runtime/runtime-inl.h | 103 + runtime/runtime.cc | 3068 +++++ runtime/runtime.h | 1357 +++ runtime/runtime_android.cc | 51 + runtime/runtime_callbacks.cc | 351 + runtime/runtime_callbacks.h | 310 + runtime/runtime_callbacks_test.cc | 521 + runtime/runtime_common.cc | 551 + runtime/runtime_common.h | 84 + runtime/runtime_globals.h | 90 + runtime/runtime_intrinsics.cc | 84 + runtime/runtime_intrinsics.h | 26 + runtime/runtime_linux.cc | 83 + runtime/runtime_options.cc | 37 + runtime/runtime_options.def | 189 + runtime/runtime_options.h | 83 + runtime/runtime_stats.h | 114 + runtime/runtime_test.cc | 78 + runtime/scoped_thread_state_change-inl.h | 133 + runtime/scoped_thread_state_change.cc | 50 + runtime/scoped_thread_state_change.h | 213 + runtime/signal_catcher.cc | 213 + runtime/signal_catcher.h | 62 + runtime/signal_set.h | 62 + runtime/stack.cc | 1009 ++ runtime/stack.h | 381 + runtime/stack_map.cc | 358 + runtime/stack_map.h | 511 + runtime/stack_reference.h | 32 + runtime/string_builder_append.cc | 364 + runtime/string_builder_append.h | 67 + runtime/subtype_check.h | 597 + runtime/subtype_check_bits.h | 66 + runtime/subtype_check_bits_and_status.h | 85 + runtime/subtype_check_info.h | 523 + runtime/subtype_check_info_test.cc | 423 + runtime/subtype_check_test.cc | 1065 ++ runtime/suspend_reason.h | 37 + runtime/thread-current-inl.h | 47 + runtime/thread-inl.h | 421 + runtime/thread.cc | 4368 ++++++++ runtime/thread.h | 2035 ++++ runtime/thread_android.cc | 29 + runtime/thread_linux.cc | 73 + runtime/thread_list.cc | 1450 +++ runtime/thread_list.h | 259 + runtime/thread_pool.cc | 304 + runtime/thread_pool.h | 212 + runtime/thread_pool_test.cc | 214 + runtime/thread_state.h | 64 + runtime/ti/agent.cc | 239 + runtime/ti/agent.h | 146 + runtime/trace.cc | 1059 ++ runtime/trace.h | 391 + runtime/transaction.cc | 735 ++ runtime/transaction.h | 350 + runtime/transaction_test.cc | 740 ++ runtime/two_runtimes_test.cc | 41 + runtime/utils/dex_cache_arrays_layout-inl.h | 184 + runtime/utils/dex_cache_arrays_layout.h | 138 + runtime/var_handles.cc | 98 + runtime/var_handles.h | 35 + runtime/vdex_file.cc | 509 + runtime/vdex_file.h | 399 + runtime/vdex_file_test.cc | 48 + runtime/verifier/class_verifier.cc | 340 + runtime/verifier/class_verifier.h | 121 + runtime/verifier/instruction_flags.cc | 42 + runtime/verifier/instruction_flags.h | 134 + runtime/verifier/method_verifier-inl.h | 48 + runtime/verifier/method_verifier.cc | 5622 ++++++++++ runtime/verifier/method_verifier.h | 402 + runtime/verifier/method_verifier_test.cc | 75 + runtime/verifier/reg_type-inl.h | 218 + runtime/verifier/reg_type.cc | 1099 ++ runtime/verifier/reg_type.h | 1167 ++ runtime/verifier/reg_type_cache-inl.h | 197 + runtime/verifier/reg_type_cache.cc | 731 ++ runtime/verifier/reg_type_cache.h | 228 + runtime/verifier/reg_type_test.cc | 1150 ++ runtime/verifier/register_line-inl.h | 240 + runtime/verifier/register_line.cc | 546 + runtime/verifier/register_line.h | 456 + runtime/verifier/scoped_newline.h | 64 + runtime/verifier/verifier_compiler_binding.h | 40 + runtime/verifier/verifier_deps.cc | 1240 +++ runtime/verifier/verifier_deps.h | 413 + runtime/verifier/verifier_enums.h | 109 + runtime/verify_object-inl.h | 38 + runtime/verify_object.cc | 47 + runtime/verify_object.h | 75 + runtime/well_known_classes.cc | 636 ++ runtime/well_known_classes.h | 190 + runtime/write_barrier-inl.h | 56 + runtime/write_barrier.h | 62 + sigchainlib/Android.bp | 97 + sigchainlib/OWNERS | 4 + sigchainlib/log.h | 45 + sigchainlib/sigchain.cc | 539 + sigchainlib/sigchain.h | 42 + sigchainlib/sigchain_dummy.cc | 56 + sigchainlib/sigchain_test.cc | 240 + sigchainlib/version-script32.txt | 15 + sigchainlib/version-script64.txt | 14 + simulator/Android.bp | 103 + simulator/code_simulator.cc | 36 + simulator/code_simulator_arm64.cc | 73 + simulator/code_simulator_arm64.h | 60 + simulator/code_simulator_container.cc | 58 + simulator/code_simulator_container.h | 57 + simulator/include/code_simulator.h | 46 + test.py | 70 + test/000-nop/build | 3 + test/000-nop/expected.txt | 1 + test/000-nop/info.txt | 2 + test/000-nop/run | 3 + test/001-HelloWorld/expected.txt | 1 + test/001-HelloWorld/info.txt | 1 + test/001-HelloWorld/src/Main.java | 21 + test/001-Main/expected.txt | 0 test/001-Main/info.txt | 1 + test/001-Main/src/Main.java | 20 + test/002-sleep/expected.txt | 2 + test/002-sleep/info.txt | 3 + test/002-sleep/src/Main.java | 22 + test/003-omnibus-opcodes/build | 36 + test/003-omnibus-opcodes/expected.txt | 68 + test/003-omnibus-opcodes/info.txt | 1 + test/003-omnibus-opcodes/src/Array.java | 237 + test/003-omnibus-opcodes/src/Classes.java | 233 + test/003-omnibus-opcodes/src/Compare.java | 183 + test/003-omnibus-opcodes/src/FloatMath.java | 387 + test/003-omnibus-opcodes/src/GenSelect.java | 28 + test/003-omnibus-opcodes/src/Goto.java | 2408 ++++ test/003-omnibus-opcodes/src/InstField.java | 108 + test/003-omnibus-opcodes/src/IntMath.java | 615 + .../src/InternedString.java | 59 + test/003-omnibus-opcodes/src/Main.java | 81 + test/003-omnibus-opcodes/src/MethodCall.java | 82 + test/003-omnibus-opcodes/src/Monitor.java | 57 + test/003-omnibus-opcodes/src/StaticField.java | 76 + test/003-omnibus-opcodes/src/Switch.java | 62 + test/003-omnibus-opcodes/src/Throw.java | 124 + test/003-omnibus-opcodes/src/UnresClass.java | 9 + .../src/UnresClassSubclass.java | 4 + test/003-omnibus-opcodes/src/UnresStuff.java | 22 + test/003-omnibus-opcodes/src/UnresTest1.java | 80 + test/003-omnibus-opcodes/src/UnresTest2.java | 59 + test/003-omnibus-opcodes/src2/UnresStuff.java | 9 + test/004-InterfaceTest/expected.txt | 2 + test/004-InterfaceTest/info.txt | 1 + test/004-InterfaceTest/src/Main.java | 53 + test/004-JniTest/build | 47 + test/004-JniTest/expected.txt | 64 + test/004-JniTest/info.txt | 1 + test/004-JniTest/jni_test.cc | 815 ++ .../004-JniTest/smali/AbstractInterface.smali | 26 + test/004-JniTest/smali/ConcreteClass.smali | 72 + .../004-JniTest/smali/ConflictInterface.smali | 35 + test/004-JniTest/smali/DefaultInterface.smali | 77 + test/004-JniTest/src-ex/A.java | 21 + test/004-JniTest/src/Main.java | 465 + .../optimization/CriticalNative.java | 31 + .../annotation/optimization/FastNative.java | 31 + test/004-NativeAllocations/expected.txt | 1 + test/004-NativeAllocations/info.txt | 1 + test/004-NativeAllocations/src-art/Main.java | 127 + test/004-ReferenceMap/build | 42 + test/004-ReferenceMap/classes.dex | Bin 0 -> 1108 bytes test/004-ReferenceMap/expected.txt | 1 + test/004-ReferenceMap/info.txt | 1 + test/004-ReferenceMap/src/Main.java | 44 + .../004-ReferenceMap/stack_walk_refmap_jni.cc | 211 + test/004-SignalTest/expected.txt | 10 + test/004-SignalTest/info.txt | 1 + test/004-SignalTest/signaltest.cc | 169 + test/004-SignalTest/src/Main.java | 58 + test/004-StackWalk/build | 39 + test/004-StackWalk/classes.dex | Bin 0 -> 3844 bytes test/004-StackWalk/expected.txt | 5 + test/004-StackWalk/info.txt | 1 + test/004-StackWalk/src/Main.java | 115 + test/004-StackWalk/stack_walk_jni.cc | 106 + test/004-ThreadStress/check | 23 + test/004-ThreadStress/expected.txt | 22 + test/004-ThreadStress/info.txt | 1 + test/004-ThreadStress/run | 28 + test/004-ThreadStress/src-art/Main.java | 877 ++ test/004-ThreadStress/thread_stress.cc | 36 + test/004-UnsafeTest/expected.txt | 1 + test/004-UnsafeTest/info.txt | 1 + test/004-UnsafeTest/src/Main.java | 303 + test/004-UnsafeTest/unsafe_test.cc | 53 + test/004-checker-UnsafeTest18/expected.txt | 2 + test/004-checker-UnsafeTest18/info.txt | 1 + test/004-checker-UnsafeTest18/src/Main.java | 377 + test/005-annotations/build | 46 + test/005-annotations/expected.txt | 112 + test/005-annotations/info.txt | 1 + test/005-annotations/src/Main.java | 7 + .../src/android/test/AnnoSimplePackage1.java | 8 + .../src/android/test/anno/AnnoArrayField.java | 19 + .../test/anno/AnnoFancyConstructor.java | 10 + .../src/android/test/anno/AnnoFancyField.java | 12 + .../android/test/anno/AnnoFancyMethod.java | 14 + .../android/test/anno/AnnoFancyParameter.java | 10 + .../src/android/test/anno/AnnoFancyType.java | 11 + .../android/test/anno/AnnoMissingClass.java | 24 + .../test/anno/AnnoRenamedEnumMethod.java | 26 + .../test/anno/AnnoSimpleConstructor.java | 8 + .../android/test/anno/AnnoSimpleField.java | 8 + .../test/anno/AnnoSimpleLocalVariable.java | 8 + .../android/test/anno/AnnoSimpleMethod.java | 8 + .../android/test/anno/AnnoSimplePackage.java | 8 + .../test/anno/AnnoSimpleParameter.java | 8 + .../src/android/test/anno/AnnoSimpleType.java | 9 + .../android/test/anno/AnnoSimpleType2.java | 8 + .../test/anno/AnnoSimpleTypeInvis.java | 8 + .../anno/ClassWithInnerAnnotationClass.java | 8 + .../test/anno/ClassWithInnerClasses.java | 35 + .../test/anno/ClassWithMissingAnnotation.java | 22 + .../android/test/anno/ExportedProperty.java | 12 + .../src/android/test/anno/FullyNoted.java | 39 + .../src/android/test/anno/INoted.java | 7 + .../src/android/test/anno/IntToString.java | 12 + .../android/test/anno/MissingAnnotation.java | 8 + .../android/test/anno/RenamedEnumClass.java | 26 + .../src/android/test/anno/RenamedNoted.java | 24 + .../src/android/test/anno/SimplyNoted.java | 36 + .../src/android/test/anno/SomeClass.java | 4 + .../src/android/test/anno/SubNoted.java | 12 + .../android/test/anno/TestAnnotations.java | 299 + .../src/android/test/anno/package-info.java | 2 + .../src/android/test/package-info.java | 2 + .../android/test/anno/RenamedEnumClass.java | 26 + test/006-args/expected.txt | 5 + test/006-args/info.txt | 6 + test/006-args/src/ArgsTest.java | 44 + test/006-args/src/Main.java | 28 + test/007-count10/expected.txt | 10 + test/007-count10/info.txt | 6 + test/007-count10/src/Main.java | 29 + test/008-exceptions/expected.txt | 25 + test/008-exceptions/info.txt | 6 + .../src-multidex/MultiDexBadInitWrapper2.java | 24 + test/008-exceptions/src/Main.java | 215 + test/008-exceptions/src/MultiDexBadInit.java | 25 + .../src/MultiDexBadInitWrapper1.java | 24 + test/009-instanceof/expected.txt | 11 + test/009-instanceof/info.txt | 6 + test/009-instanceof/src/Iface1.java | 27 + test/009-instanceof/src/Iface2.java | 23 + test/009-instanceof/src/Iface2Sub1.java | 23 + test/009-instanceof/src/ImplA.java | 28 + test/009-instanceof/src/ImplB.java | 30 + test/009-instanceof/src/ImplBSub.java | 28 + test/009-instanceof/src/Main.java | 70 + test/010-instance/expected.txt | 30 + test/010-instance/info.txt | 6 + test/010-instance/src/InstanceTest.java | 107 + test/010-instance/src/Main.java | 24 + test/010-instance/src/X.java | 8 + test/010-instance/src/Y.java | 8 + test/011-array-copy/expected.txt | 15 + test/011-array-copy/info.txt | 6 + test/011-array-copy/src/Iface1.java | 27 + test/011-array-copy/src/Iface2.java | 23 + test/011-array-copy/src/ImplA.java | 28 + test/011-array-copy/src/Main.java | 168 + test/012-math/expected.txt | 40 + test/012-math/info.txt | 6 + test/012-math/src/Main.java | 125 + test/013-math2/expected.txt | 2 + test/013-math2/info.txt | 6 + test/013-math2/src/Main.java | 36 + test/014-math3/expected.txt | 1 + test/014-math3/info.txt | 6 + test/014-math3/src/Main.java | 60 + test/015-switch/expected.txt | 132 + test/015-switch/info.txt | 6 + test/015-switch/src/Main.java | 553 + test/016-intern/expected.txt | 3 + test/016-intern/info.txt | 6 + test/016-intern/src/Main.java | 52 + test/017-float/expected.txt | 3 + test/017-float/info.txt | 6 + test/017-float/src/Main.java | 36 + test/018-stack-overflow/expected.txt | 4 + test/018-stack-overflow/info.txt | 6 + test/018-stack-overflow/src/Main.java | 86 + test/019-wrong-array-type/expected.txt | 1 + test/019-wrong-array-type/info.txt | 6 + test/019-wrong-array-type/src/Main.java | 33 + test/020-string/expected.txt | 13 + test/020-string/info.txt | 6 + test/020-string/src/Main.java | 172 + test/021-string2/expected.txt | 6 + test/021-string2/info.txt | 6 + test/021-string2/src/Main.java | 890 ++ .../src/junit/framework/Assert.java | 296 + .../junit/framework/AssertionFailedError.java | 20 + .../junit/framework/ComparisonCompactor.java | 87 + .../junit/framework/ComparisonFailure.java | 52 + test/022-interface/classes/Iface1.class | Bin 0 -> 389 bytes test/022-interface/classes/Iface2.class | Bin 0 -> 117 bytes test/022-interface/classes/Iface2Sub1.class | Bin 0 -> 142 bytes test/022-interface/classes/ImplA.class | Bin 0 -> 321 bytes test/022-interface/classes/ImplB.class | Bin 0 -> 506 bytes test/022-interface/classes/ImplBSub.class | Bin 0 -> 309 bytes test/022-interface/classes/Main$1.class | Bin 0 -> 757 bytes .../classes/Main$SubInterface.class | Bin 0 -> 221 bytes .../classes/Main$SubInterfaceImpl.class | Bin 0 -> 606 bytes test/022-interface/classes/Main.class | Bin 0 -> 1599 bytes .../classes/ObjectOverridingInterface.class | Bin 0 -> 269 bytes .../SubObjectOverridingInterface.class | Bin 0 -> 170 bytes test/022-interface/expected.txt | 3 + test/022-interface/info.txt | 6 + test/022-interface/src/Iface1.java | 27 + test/022-interface/src/Iface2.java | 23 + test/022-interface/src/Iface2Sub1.java | 23 + test/022-interface/src/ImplA.java | 28 + test/022-interface/src/ImplB.java | 30 + test/022-interface/src/ImplBSub.java | 28 + test/022-interface/src/Main.java | 97 + .../src/ObjectOverridingInterface.java | 24 + .../src/SubObjectOverridingInterface.java | 18 + test/023-many-interfaces/build | 24 + test/023-many-interfaces/expected.txt | 9 + test/023-many-interfaces/iface-gen.c | 54 + test/023-many-interfaces/info.txt | 6 + test/023-many-interfaces/src/Main.java | 6 + .../src/ManyInterfaces.java | 427 + test/024-illegal-access/expected.txt | 5 + test/024-illegal-access/info.txt | 3 + .../src/CheckInstanceof.java | 27 + test/024-illegal-access/src/Main.java | 68 + test/024-illegal-access/src/PublicAccess.java | 37 + test/024-illegal-access/src/SemiPrivate.java | 32 + .../src/otherpkg/Package.java | 23 + test/024-illegal-access/src2/SemiPrivate.java | 32 + .../src2/otherpkg/Package.java | 23 + test/025-access-controller/expected.txt | 1 + test/025-access-controller/info.txt | 6 + test/025-access-controller/src/Main.java | 28 + test/025-access-controller/src/Privvy.java | 32 + test/026-access/expected.txt | 6 + test/026-access/info.txt | 6 + test/026-access/src/Iface.java | 22 + test/026-access/src/Iface2.java | 25 + test/026-access/src/Main.java | 31 + test/026-access/src/Unrelated.java | 24 + .../src/otherpackage/PublicAccess.java | 7 + test/027-arithmetic/expected.txt | 18 + test/027-arithmetic/info.txt | 6 + test/027-arithmetic/src/Main.java | 155 + test/028-array-write/expected.txt | 3 + test/028-array-write/info.txt | 6 + test/028-array-write/src/Main.java | 85 + test/029-assert/expected.txt | 1 + test/029-assert/info.txt | 6 + test/029-assert/src/Main.java | 33 + test/030-bad-finalizer/expected.txt | 5 + test/030-bad-finalizer/info.txt | 15 + test/030-bad-finalizer/run | 22 + test/030-bad-finalizer/src/Main.java | 110 + test/031-class-attributes/expected.txt | 233 + test/031-class-attributes/info.txt | 6 + .../jasmin/ClassAttrs$1.j | 49 + test/031-class-attributes/src/ClassAttrs.java | 361 + test/031-class-attributes/src/FancyClass.java | 22 + test/031-class-attributes/src/Main.java | 5 + test/031-class-attributes/src/OtherClass.java | 2 + .../src/otherpackage/OtherPackageClass.java | 4 + test/032-concrete-sub/expected.txt | 6 + test/032-concrete-sub/info.txt | 3 + test/032-concrete-sub/src/AbstractBase.java | 26 + test/032-concrete-sub/src/ConcreteSub.java | 53 + test/032-concrete-sub/src/ConcreteSub2.java | 26 + test/032-concrete-sub/src/Main.java | 36 + test/032-concrete-sub/src2/AbstractBase.java | 29 + test/033-class-init-deadlock/expected.txt | 5 + test/033-class-init-deadlock/info.txt | 6 + test/033-class-init-deadlock/src/Main.java | 71 + test/034-call-null/expected.txt | 3 + test/034-call-null/info.txt | 6 + test/034-call-null/run | 19 + test/034-call-null/src/Main.java | 28 + test/035-enum/expected.txt | 3 + test/035-enum/info.txt | 6 + test/035-enum/src/Main.java | 37 + test/036-finalizer/expected.txt | 14 + test/036-finalizer/info.txt | 6 + test/036-finalizer/src/Main.java | 194 + test/037-inherit/expected.txt | 3 + test/037-inherit/info.txt | 6 + test/037-inherit/src/Main.java | 37 + test/038-inner-null/expected.txt | 5 + test/038-inner-null/info.txt | 6 + test/038-inner-null/run | 19 + test/038-inner-null/src/Main.java | 41 + test/039-join-main/expected.txt | 5 + test/039-join-main/info.txt | 6 + test/039-join-main/src/Main.java | 68 + test/040-miranda/expected.txt | 14 + test/040-miranda/info.txt | 6 + test/040-miranda/src/Main.java | 54 + test/040-miranda/src/MirandaAbstract.java | 32 + test/040-miranda/src/MirandaClass.java | 35 + test/040-miranda/src/MirandaClass2.java | 25 + test/040-miranda/src/MirandaInterface.java | 24 + test/040-miranda/src/MirandaInterface2.java | 26 + test/041-narrowing/expected.txt | 38 + test/041-narrowing/info.txt | 6 + test/041-narrowing/src/Main.java | 99 + test/042-new-instance/expected.txt | 12 + test/042-new-instance/info.txt | 2 + test/042-new-instance/src/Main.java | 208 + test/042-new-instance/src/MaybeAbstract.java | 20 + .../src/otherpackage/ConstructorAccess.java | 36 + .../src/otherpackage/PackageAccess.java | 29 + test/042-new-instance/src2/MaybeAbstract.java | 20 + test/043-privates/expected.txt | 6 + test/043-privates/info.txt | 6 + test/043-privates/src/Main.java | 59 + test/044-proxy/expected.txt | 104 + test/044-proxy/info.txt | 6 + test/044-proxy/native_proxy.cc | 32 + test/044-proxy/run | 18 + test/044-proxy/src/BasicTest.java | 309 + test/044-proxy/src/Clash.java | 70 + test/044-proxy/src/Clash2.java | 60 + test/044-proxy/src/Clash3.java | 75 + test/044-proxy/src/Clash4.java | 77 + test/044-proxy/src/ConstructorProxy.java | 53 + test/044-proxy/src/FloatSelect.java | 43 + test/044-proxy/src/Main.java | 61 + test/044-proxy/src/MethodComparator.java | 24 + test/044-proxy/src/NarrowingTest.java | 70 + test/044-proxy/src/NativeProxy.java | 62 + test/044-proxy/src/OOMEOnDispatch.java | 70 + test/044-proxy/src/ReturnsAndArgPassing.java | 467 + test/044-proxy/src/WrappedThrow.java | 244 + test/045-reflect-array/expected.txt | 12 + test/045-reflect-array/info.txt | 6 + test/045-reflect-array/src/Main.java | 269 + test/046-reflect/expected.txt | 139 + test/046-reflect/info.txt | 6 + test/046-reflect/src/Main.java | 885 ++ test/046-reflect/src/otherpackage/Other.java | 38 + test/047-returns/expected.txt | 10 + test/047-returns/info.txt | 6 + test/047-returns/src/Main.java | 79 + test/048-reflect-v8/expected.txt | 104 + test/048-reflect-v8/info.txt | 1 + test/048-reflect-v8/src/AnnotationTest.java | 291 + .../src/AnnotationTestFixture.java | 48 + .../src/AnnotationTestHelpers.java | 86 + test/048-reflect-v8/src/Calendar.java | 32 + test/048-reflect-v8/src/Calendars.java | 26 + test/048-reflect-v8/src/DefaultDeclared.java | 71 + test/048-reflect-v8/src/IFaceA.java | 24 + test/048-reflect-v8/src/IFaceSimple.java | 21 + test/048-reflect-v8/src/IsDefaultTest.java | 60 + test/048-reflect-v8/src/Main.java | 28 + test/048-reflect-v8/src/SingleUser.java | 21 + test/048-reflect-v8/src/User.java | 31 + test/048-reflect-v8/src/User2.java | 27 + test/048-reflect-v8/src/UserComplex.java | 31 + test/048-reflect-v8/src/UserSub.java | 21 + test/048-reflect-v8/src/UserSub2.java | 23 + test/049-show-object/expected.txt | 11 + test/049-show-object/info.txt | 6 + test/049-show-object/src/Main.java | 48 + test/050-sync-test/expected.txt | 34 + test/050-sync-test/info.txt | 6 + test/050-sync-test/src/Main.java | 192 + .../050-sync-test/src/ThreadDeathHandler.java | 33 + test/051-thread/expected.txt | 17 + test/051-thread/info.txt | 6 + test/051-thread/src/Main.java | 235 + test/051-thread/thread_test.cc | 38 + test/052-verifier-fun/expected.txt | 3 + test/052-verifier-fun/info.txt | 6 + test/052-verifier-fun/src/Blah.java | 4 + test/052-verifier-fun/src/BlahFeature.java | 3 + test/052-verifier-fun/src/BlahOne.java | 5 + test/052-verifier-fun/src/BlahTwo.java | 5 + test/052-verifier-fun/src/Main.java | 135 + test/053-wait-some/expected.txt | 7 + test/053-wait-some/info.txt | 6 + test/053-wait-some/src/Main.java | 85 + test/054-uncaught/expected.txt | 21 + test/054-uncaught/info.txt | 6 + test/054-uncaught/run | 19 + test/054-uncaught/src/Main.java | 77 + test/054-uncaught/src/ThreadDeathHandler.java | 33 + test/055-enum-performance/expected.txt | 12 + test/055-enum-performance/info.txt | 2 + test/055-enum-performance/run | 18 + test/055-enum-performance/src/Main.java | 215 + .../src/SamePackagePrivateEnum.java | 5 + .../src/SamePackagePublicEnum.java | 5 + .../otherpackage/OtherPackagePublicEnum.java | 7 + test/056-const-string-jumbo/build | 42 + test/056-const-string-jumbo/expected.txt | 1 + test/056-const-string-jumbo/info.txt | 1 + test/056-const-string-jumbo/src/Main.java | 21 + test/058-enum-order/expected.txt | 5 + test/058-enum-order/info.txt | 1 + test/058-enum-order/src/Main.java | 31 + test/059-finalizer-throw/expected.txt | 3 + test/059-finalizer-throw/info.txt | 1 + test/059-finalizer-throw/run | 19 + test/059-finalizer-throw/src/Main.java | 77 + test/061-out-of-memory/expected.txt | 8 + test/061-out-of-memory/info.txt | 1 + test/061-out-of-memory/src/Main.java | 127 + test/062-character-encodings/expected.txt | 1 + test/062-character-encodings/info.txt | 1 + test/062-character-encodings/src/Main.java | 25 + test/063-process-manager/expected.txt | 15 + test/063-process-manager/info.txt | 2 + test/063-process-manager/src/Main.java | 93 + test/064-field-access/expected.txt | 5 + test/064-field-access/info.txt | 10 + .../jasmin/SubClassUsingInaccessibleField.j | 36 + test/064-field-access/run | 18 + test/064-field-access/src/GetNonexistent.java | 21 + test/064-field-access/src/Holder.java | 19 + test/064-field-access/src/Main.java | 733 ++ .../src/OOMEOnNullAccess.java | 74 + .../src/other/ProtectedClass.java | 106 + .../src/other/PublicClass.java | 105 + test/064-field-access/src2/Holder.java | 19 + test/065-mismatched-implements/build | 27 + test/065-mismatched-implements/expected.txt | 1 + test/065-mismatched-implements/info.txt | 2 + test/065-mismatched-implements/src/Base.java | 21 + test/065-mismatched-implements/src/Defs.java | 21 + .../src/Indirect.java | 27 + test/065-mismatched-implements/src/Main.java | 29 + test/065-mismatched-implements/src2/Defs.java | 25 + test/066-mismatched-super/build | 17 + test/066-mismatched-super/expected.txt | 2 + test/066-mismatched-super/info.txt | 5 + test/066-mismatched-super/src/Base.java | 19 + test/066-mismatched-super/src/Defs.java | 25 + .../src/ExtendsFinal.java | 18 + test/066-mismatched-super/src/Final.java | 18 + test/066-mismatched-super/src/Main.java | 35 + test/066-mismatched-super/src2/Defs.java | 21 + test/066-mismatched-super/src2/Final.java | 18 + test/067-preemptive-unpark/expected.txt | 5 + test/067-preemptive-unpark/info.txt | 1 + test/067-preemptive-unpark/src/Main.java | 128 + test/068-classloader/expected.txt | 16 + test/068-classloader/info.txt | 8 + test/068-classloader/src-ex/AbstractGet.java | 32 + .../068-classloader/src-ex/DoubledExtend.java | 34 + .../src-ex/DoubledExtendOkay.java | 36 + .../src-ex/DoubledImplement.java | 32 + .../src-ex/DoubledImplement2.java | 32 + test/068-classloader/src-ex/GetDoubled.java | 26 + test/068-classloader/src-ex/IfaceImpl.java | 21 + test/068-classloader/src-ex/IfaceSub.java | 19 + .../068-classloader/src-ex/Inaccessible1.java | 25 + .../068-classloader/src-ex/Inaccessible2.java | 24 + .../068-classloader/src-ex/Inaccessible3.java | 24 + .../src-ex/MutationTarget.java | 22 + test/068-classloader/src-ex/Mutator.java | 25 + test/068-classloader/src/Base.java | 30 + test/068-classloader/src/BaseOkay.java | 38 + test/068-classloader/src/DoubledExtend.java | 34 + .../src/DoubledExtendOkay.java | 36 + .../068-classloader/src/DoubledImplement.java | 32 + .../src/DoubledImplement2.java | 32 + test/068-classloader/src/FancyLoader.java | 227 + test/068-classloader/src/ICommon.java | 22 + test/068-classloader/src/ICommon2.java | 22 + test/068-classloader/src/IGetDoubled.java | 22 + test/068-classloader/src/IfaceSuper.java | 19 + .../068-classloader/src/InaccessibleBase.java | 21 + .../src/InaccessibleInterface.java | 21 + test/068-classloader/src/Main.java | 517 + test/068-classloader/src/SimpleBase.java | 22 + test/068-classloader/src/Useless.java | 4 + test/069-field-type/expected.txt | 4 + test/069-field-type/info.txt | 4 + test/069-field-type/src/Blah.java | 9 + test/069-field-type/src/Holder.java | 7 + test/069-field-type/src/Main.java | 34 + test/069-field-type/src2/Blah.java | 10 + test/070-nio-buffer/expected.txt | 6 + test/070-nio-buffer/info.txt | 1 + test/070-nio-buffer/src/Main.java | 177 + test/071-dexfile-get-static-size/build | 28 + test/071-dexfile-get-static-size/expected.txt | 4 + test/071-dexfile-get-static-size/info.txt | 3 + .../071-dexfile-get-static-size/res/test1.dex | Bin 0 -> 1864 bytes .../071-dexfile-get-static-size/res/test2.dex | Bin 0 -> 1264 bytes .../071-dexfile-get-static-size/src/Main.java | 44 + test/071-dexfile-map-clean/build | 21 + test/071-dexfile-map-clean/expected.txt | 3 + test/071-dexfile-map-clean/info.txt | 11 + test/071-dexfile-map-clean/run | 25 + .../071-dexfile-map-clean/src-ex/Another.java | 21 + test/071-dexfile-map-clean/src/Main.java | 134 + test/071-dexfile/expected.txt | 4 + test/071-dexfile/info.txt | 4 + test/071-dexfile/src-ex/Another.java | 28 + test/071-dexfile/src/Main.java | 109 + test/072-precise-gc/expected.txt | 2 + test/072-precise-gc/info.txt | 1 + test/072-precise-gc/src/Main.java | 113 + test/072-reachability-fence/expected.txt | 5 + test/072-reachability-fence/info.txt | 4 + test/072-reachability-fence/src/Main.java | 61 + test/073-mismatched-field/expected.txt | 1 + test/073-mismatched-field/info.txt | 3 + test/073-mismatched-field/src/IMain.java | 19 + test/073-mismatched-field/src/Main.java | 31 + test/073-mismatched-field/src/SuperMain.java | 19 + test/073-mismatched-field/src2/IMain.java | 19 + test/074-gc-thrash/expected.txt | 2 + test/074-gc-thrash/info.txt | 1 + test/074-gc-thrash/src/Main.java | 358 + test/075-verification-error/expected.txt | 13 + test/075-verification-error/info.txt | 1 + .../src/BadIfaceImpl.java | 17 + .../src/BadInterface.java | 21 + test/075-verification-error/src/Main.java | 162 + .../src/MaybeAbstract.java | 20 + .../src/other/InaccessibleClass.java | 23 + .../src/other/InaccessibleMethod.java | 21 + .../src/other/Mutant.java | 43 + .../src2/BadInterface.java | 17 + .../src2/MaybeAbstract.java | 20 + .../src2/other/InaccessibleClass.java | 23 + .../src2/other/InaccessibleMethod.java | 21 + .../src2/other/Mutant.java | 43 + test/076-boolean-put/expected.txt | 1 + test/076-boolean-put/info.txt | 3 + test/076-boolean-put/src/Main.java | 48 + test/077-method-override/expected.txt | 15 + test/077-method-override/info.txt | 2 + test/077-method-override/src/Base.java | 83 + test/077-method-override/src/Derived.java | 59 + test/077-method-override/src/Main.java | 55 + test/077-method-override/src2/Base.java | 82 + test/078-polymorphic-virtual/expected.txt | 3 + test/078-polymorphic-virtual/info.txt | 2 + test/078-polymorphic-virtual/src/Base.java | 32 + .../078-polymorphic-virtual/src/Derived1.java | 21 + .../078-polymorphic-virtual/src/Derived2.java | 21 + .../078-polymorphic-virtual/src/Derived3.java | 21 + test/078-polymorphic-virtual/src/Main.java | 40 + test/079-phantom/expected.txt | 14 + test/079-phantom/info.txt | 1 + test/079-phantom/src/Bitmap.java | 160 + test/079-phantom/src/Main.java | 98 + test/080-oom-fragmentation/expected.txt | 0 test/080-oom-fragmentation/info.txt | 2 + test/080-oom-fragmentation/src/Main.java | 35 + .../080-oom-throw-with-finalizer/expected.txt | 0 test/080-oom-throw-with-finalizer/info.txt | 1 + .../src/Main.java | 80 + test/080-oom-throw/expected.txt | 4 + test/080-oom-throw/info.txt | 3 + test/080-oom-throw/run | 31 + test/080-oom-throw/src/Main.java | 171 + test/081-hot-exceptions/expected.txt | 2 + test/081-hot-exceptions/info.txt | 3 + test/081-hot-exceptions/src/Main.java | 42 + test/082-inline-execute/expected.txt | 0 test/082-inline-execute/info.txt | 1 + test/082-inline-execute/src/Main.java | 1675 +++ .../src/junit/framework/Assert.java | 296 + .../junit/framework/AssertionFailedError.java | 20 + .../junit/framework/ComparisonCompactor.java | 87 + .../junit/framework/ComparisonFailure.java | 52 + test/083-compiler-regressions/expected.txt | 44 + test/083-compiler-regressions/info.txt | 12 + test/083-compiler-regressions/src/Main.java | 9886 +++++++++++++++++ .../src/ZeroTests.java | 49 + test/084-class-init/expected.txt | 12 + test/084-class-init/info.txt | 1 + test/084-class-init/src/Exploder.java | 28 + test/084-class-init/src/IntHolder.java | 41 + test/084-class-init/src/Main.java | 161 + test/084-class-init/src/PartialInit.java | 24 + test/084-class-init/src/SlowInit.java | 40 + test/085-old-style-inner-class/expected.txt | 8 + test/085-old-style-inner-class/info.txt | 2 + .../085-old-style-inner-class/jasmin/Main$1.j | 39 + .../085-old-style-inner-class/jasmin/Main$2.j | 39 + test/085-old-style-inner-class/src/Main.java | 59 + test/086-null-super/expected.txt | 1 + test/086-null-super/info.txt | 7 + test/086-null-super/src/Main.java | 163 + test/087-gc-after-link/expected.txt | 2 + test/087-gc-after-link/info.txt | 8 + test/087-gc-after-link/src/Main.java | 179 + test/088-monitor-verification/expected.txt | 7 + test/088-monitor-verification/info.txt | 2 + .../smali/NotStructuredOverUnlock.smali | 21 + .../smali/NotStructuredUnderUnlock.smali | 21 + .../smali/NullLocks.smali | 28 + test/088-monitor-verification/smali/OK.smali | 68 + .../smali/TooDeep.smali | 82 + .../smali/UnbalancedJoin.smali | 31 + .../smali/UnbalancedStraight.smali | 18 + test/088-monitor-verification/src/Main.java | 288 + .../src/MyException.java | 24 + .../088-monitor-verification/src/TwoPath.java | 53 + test/089-many-methods/build | 51 + test/089-many-methods/check | 22 + test/089-many-methods/expected.txt | 1 + test/089-many-methods/info.txt | 2 + test/090-loop-formation/expected.txt | 6 + test/090-loop-formation/info.txt | 3 + test/090-loop-formation/src/Main.java | 82 + .../091-override-package-private-method/build | 36 + .../expected.txt | 1 + .../info.txt | 3 + test/091-override-package-private-method/run | 18 + .../src/Main.java | 28 + .../OverridePackagePrivateMethodSuper.java | 21 + .../src/OverridePackagePrivateMethodTest.java | 28 + test/092-locale/expected.txt | 12 + test/092-locale/info.txt | 1 + test/092-locale/src/Main.java | 159 + test/093-serialization/expected.txt | 2 + test/093-serialization/info.txt | 1 + test/093-serialization/src/Main.java | 132 + test/094-pattern/expected.txt | 3 + test/094-pattern/info.txt | 4 + test/094-pattern/src/Main.java | 95 + test/095-switch-MAX_INT/expected.txt | 1 + test/095-switch-MAX_INT/info.txt | 1 + test/095-switch-MAX_INT/src/Main.java | 11 + .../096-array-copy-concurrent-gc/expected.txt | 3 + test/096-array-copy-concurrent-gc/info.txt | 2 + .../src/Main.java | 86 + test/097-duplicate-method/classes.dex | Bin 0 -> 920 bytes test/097-duplicate-method/expected.txt | 1 + test/097-duplicate-method/info.txt | 7 + test/098-ddmc/expected.txt | 23 + test/098-ddmc/info.txt | 1 + test/098-ddmc/src/Main.java | 205 + test/099-vmdebug/check | 20 + test/099-vmdebug/expected.txt | 31 + test/099-vmdebug/info.txt | 1 + test/099-vmdebug/src/Main.java | 365 + test/100-reflect2/expected.txt | 68 + test/100-reflect2/info.txt | 1 + test/100-reflect2/src/Main.java | 380 + test/100-reflect2/src/sub/PPClass.java | 23 + .../1000-non-moving-space-stress/expected.txt | 1 + test/1000-non-moving-space-stress/info.txt | 5 + .../src-art/Main.java | 53 + .../app_image_regions.cc | 53 + test/1001-app-image-regions/build | 34 + test/1001-app-image-regions/expected.txt | 4 + test/1001-app-image-regions/info.txt | 1 + test/1001-app-image-regions/run | 17 + test/1001-app-image-regions/src/Main.java | 37 + test/1002-notify-startup/check | 20 + test/1002-notify-startup/expected.txt | 3 + test/1002-notify-startup/info.txt | 1 + test/1002-notify-startup/src-art/Main.java | 75 + test/1002-notify-startup/startup_interface.cc | 32 + test/1003-metadata-section-strings/build | 41 + .../expected.txt | 9 + test/1003-metadata-section-strings/info.txt | 1 + test/1003-metadata-section-strings/profile | 1 + test/1003-metadata-section-strings/run | 17 + .../src-art/Main.java | 38 + .../expected.txt | 1 + test/1004-checker-volatile-ref-load/info.txt | 6 + test/1004-checker-volatile-ref-load/run | 18 + .../src/Main.java | 95 + test/101-fibonacci/expected.txt | 2 + test/101-fibonacci/info.txt | 1 + test/101-fibonacci/src/Main.java | 58 + test/102-concurrent-gc/expected.txt | 1 + test/102-concurrent-gc/info.txt | 2 + test/102-concurrent-gc/src/Main.java | 64 + test/103-string-append/expected.txt | 1 + test/103-string-append/info.txt | 1 + test/103-string-append/src/Main.java | 31 + test/104-growth-limit/expected.txt | 1 + test/104-growth-limit/info.txt | 3 + test/104-growth-limit/src/Main.java | 65 + test/105-invoke/expected.txt | 1 + test/105-invoke/info.txt | 1 + test/105-invoke/src/Main.java | 104 + test/106-exceptions2/expected.txt | 4 + test/106-exceptions2/info.txt | 1 + test/106-exceptions2/src/Main.java | 235 + test/107-int-math2/expected.txt | 35 + test/107-int-math2/info.txt | 2 + test/107-int-math2/src/Main.java | 1180 ++ test/108-check-cast/expected.txt | 1 + test/108-check-cast/info.txt | 10 + test/108-check-cast/src/Main.java | 48 + test/109-suspend-check/expected.txt | 7 + test/109-suspend-check/info.txt | 2 + test/109-suspend-check/src/Main.java | 213 + test/110-field-access/expected.txt | 2 + test/110-field-access/info.txt | 2 + test/110-field-access/src/Main.java | 115 + test/111-unresolvable-exception/build | 38 + test/111-unresolvable-exception/expected.txt | 1 + test/111-unresolvable-exception/info.txt | 2 + test/111-unresolvable-exception/src/Main.java | 41 + .../src/TestException.java | 18 + test/112-double-math/expected.txt | 1 + test/112-double-math/info.txt | 1 + test/112-double-math/src/Main.java | 31 + test/113-multidex/expected.txt | 12 + test/113-multidex/info.txt | 2 + test/113-multidex/src-multidex/Main.java | 35 + test/113-multidex/src/FillerA.java | 23 + test/113-multidex/src/FillerB.java | 23 + test/113-multidex/src/Inf1.java | 28 + test/113-multidex/src/Inf2.java | 28 + test/113-multidex/src/Inf3.java | 28 + test/113-multidex/src/Inf4.java | 28 + test/113-multidex/src/Inf5.java | 28 + test/113-multidex/src/Inf6.java | 28 + test/113-multidex/src/Inf7.java | 28 + test/113-multidex/src/Inf8.java | 28 + test/113-multidex/src/Second.java | 57 + test/114-ParallelGC/expected.txt | 0 test/114-ParallelGC/info.txt | 1 + test/114-ParallelGC/src/Main.java | 142 + test/115-native-bridge/check | 20 + test/115-native-bridge/expected.txt | 82 + test/115-native-bridge/info.txt | 1 + test/115-native-bridge/nativebridge.cc | 672 ++ test/115-native-bridge/run | 39 + .../src/NativeBridgeMain.java | 215 + test/116-nodex2oat/expected.txt | 2 + test/116-nodex2oat/info.txt | 1 + test/116-nodex2oat/run | 27 + test/116-nodex2oat/src/Main.java | 24 + test/118-noimage-dex2oat/check | 20 + test/118-noimage-dex2oat/expected.txt | 14 + test/118-noimage-dex2oat/info.txt | 1 + test/118-noimage-dex2oat/run | 57 + .../smali/b_18485243.smali | 22 + test/118-noimage-dex2oat/src/Main.java | 85 + test/120-hashcode/expected.txt | 1 + test/120-hashcode/info.txt | 1 + test/120-hashcode/src/Main.java | 40 + test/121-modifiers/expected.txt | 0 test/121-modifiers/info.txt | 18 + test/121-modifiers/smali/A$B.smali | 42 + test/121-modifiers/smali/A$C.smali | 30 + test/121-modifiers/smali/A.smali | 41 + test/121-modifiers/smali/Inf.smali | 23 + test/121-modifiers/smali/NonInf.smali | 177 + test/121-modifiers/src-java/A.java | 23 + test/121-modifiers/src-java/Asm.java | 125 + test/121-modifiers/src-java/Inf.java | 21 + test/121-modifiers/src-java/NonInf.java | 72 + test/121-modifiers/src2/Main.java | 203 + test/121-simple-suspend-check/expected.txt | 1 + test/121-simple-suspend-check/info.txt | 1 + test/121-simple-suspend-check/src/Main.java | 35 + test/122-npe/expected.txt | 0 test/122-npe/info.txt | 1 + test/122-npe/src/Main.java | 624 ++ test/123-compiler-regressions-mt/expected.txt | 2 + test/123-compiler-regressions-mt/info.txt | 6 + .../123-compiler-regressions-mt/src/Main.java | 117 + test/123-inline-execute2/expected.txt | 299 + test/123-inline-execute2/info.txt | 1 + test/123-inline-execute2/src/Main.java | 114 + test/124-missing-classes/build | 40 + test/124-missing-classes/expected.txt | 6 + test/124-missing-classes/info.txt | 1 + test/124-missing-classes/src/Main.java | 80 + .../124-missing-classes/src/MissingClass.java | 20 + test/125-gc-and-classloading/expected.txt | 1 + test/125-gc-and-classloading/info.txt | 1 + test/125-gc-and-classloading/src/Main.java | 3072 +++++ test/126-miranda-multidex/build | 46 + test/126-miranda-multidex/expected.txt | 32 + test/126-miranda-multidex/info.txt | 2 + test/126-miranda-multidex/run | 26 + test/126-miranda-multidex/src/Main.java | 56 + .../src/MirandaAbstract.java | 36 + .../src/MirandaClass.java | 52 + .../src/MirandaClass2.java | 42 + .../src/MirandaInterface.java | 31 + .../src/MirandaInterface2.java | 26 + test/127-checker-secondarydex/build | 38 + test/127-checker-secondarydex/expected.txt | 4 + test/127-checker-secondarydex/info.txt | 3 + test/127-checker-secondarydex/run | 18 + test/127-checker-secondarydex/src/Main.java | 51 + test/127-checker-secondarydex/src/Super.java | 21 + test/127-checker-secondarydex/src/Test.java | 36 + .../expected.txt | 1 + .../info.txt | 1 + .../src/Main.java | 44 + test/129-ThreadGetId/expected.txt | 2 + test/129-ThreadGetId/info.txt | 1 + test/129-ThreadGetId/src/Main.java | 94 + test/130-hprof/expected.txt | 1 + test/130-hprof/info.txt | 1 + test/130-hprof/src-ex/Allocator.java | 22 + test/130-hprof/src/Main.java | 248 + test/132-daemon-locks-shutdown/expected.txt | 0 test/132-daemon-locks-shutdown/info.txt | 1 + test/132-daemon-locks-shutdown/src/Main.java | 53 + test/133-static-invoke-super/expected.txt | 3 + test/133-static-invoke-super/info.txt | 2 + test/133-static-invoke-super/run | 18 + test/133-static-invoke-super/src/Main.java | 63 + .../1336-short-finalizer-timeout/expected.txt | 6 + test/1336-short-finalizer-timeout/info.txt | 1 + test/1336-short-finalizer-timeout/run | 22 + .../src/Main.java | 95 + test/1337-gc-coverage/check | 22 + test/1337-gc-coverage/expected.txt | 0 test/1337-gc-coverage/gc_coverage.cc | 50 + test/1337-gc-coverage/info.txt | 1 + test/1337-gc-coverage/src/Main.java | 76 + test/1338-gc-no-los/expected.txt | 1 + test/1338-gc-no-los/info.txt | 1 + test/1338-gc-no-los/run | 16 + test/1338-gc-no-los/src-art/Main.java | 39 + test/1339-dead-reference-safe/check | 20 + test/1339-dead-reference-safe/expected.txt | 6 + test/1339-dead-reference-safe/info.txt | 1 + .../src/DeadReferenceSafeTest.java | 73 + .../src/DeadReferenceUnsafeTest.java | 70 + test/1339-dead-reference-safe/src/Main.java | 65 + .../src/ReachabilityFenceTest.java | 76 + .../src/ReachabilitySensitiveFunTest.java | 80 + .../src/ReachabilitySensitiveTest.java | 77 + test/134-nodex2oat-nofallback/check | 32 + test/134-nodex2oat-nofallback/expected.txt | 64 + test/134-nodex2oat-nofallback/info.txt | 2 + test/134-nodex2oat-nofallback/run | 22 + test/134-nodex2oat-nofallback/src/Main.java | 24 + test/134-reg-promotion/expected.txt | 0 test/134-reg-promotion/info.txt | 4 + test/134-reg-promotion/smali/Test.smali | 63 + test/134-reg-promotion/src/Main.java | 48 + test/135-MirandaDispatch/expected.txt | 2 + test/135-MirandaDispatch/info.txt | 6 + .../smali/b_21646347.smali | 15 + test/135-MirandaDispatch/src/Main.java | 60 + .../daemon_jni_shutdown.cc | 78 + test/136-daemon-jni-shutdown/expected.txt | 5 + test/136-daemon-jni-shutdown/info.txt | 1 + test/136-daemon-jni-shutdown/src/Main.java | 47 + test/137-cfi/cfi.cc | 284 + test/137-cfi/expected.txt | 18 + test/137-cfi/info.txt | 1 + test/137-cfi/run | 35 + test/137-cfi/src-multidex/Base.java | 26 + test/137-cfi/src/Main.java | 89 + test/138-duplicate-classes-check/expected.txt | 2 + test/138-duplicate-classes-check/info.txt | 1 + .../src-art/A.java | 28 + .../src-art/Main.java | 48 + .../138-duplicate-classes-check/src-ex/A.java | 23 + .../src-ex/TestEx.java | 21 + test/138-duplicate-classes-check2/build | 38 + .../138-duplicate-classes-check2/expected.txt | 2 + test/138-duplicate-classes-check2/info.txt | 2 + .../src-ex/A.java | 23 + .../src-ex/TestEx.java | 21 + test/138-duplicate-classes-check2/src/A.java | 28 + .../src/Main.java | 64 + test/139-register-natives/check | 20 + test/139-register-natives/expected.txt | 1 + test/139-register-natives/info.txt | 1 + test/139-register-natives/regnative.cc | 33 + test/139-register-natives/src/Main.java | 117 + test/140-dce-regression/expected.txt | 1 + test/140-dce-regression/info.txt | 1 + test/140-dce-regression/src/Main.java | 33 + test/140-field-packing/expected.txt | 2 + test/140-field-packing/info.txt | 1 + test/140-field-packing/src/GapOrder.java | 78 + test/140-field-packing/src/GapOrderBase.java | 24 + test/140-field-packing/src/Main.java | 23 + test/141-class-unload/expected.txt | 25 + test/141-class-unload/info.txt | 1 + test/141-class-unload/jni_unload.cc | 36 + test/141-class-unload/src-ex/IntHolder.java | 43 + test/141-class-unload/src/Main.java | 249 + test/142-classloader2/expected.txt | 5 + test/142-classloader2/info.txt | 1 + test/142-classloader2/smali/B.smali | 10 + .../smali/MyPathClassLoader.smali | 13 + test/142-classloader2/src-ex/A.java | 22 + test/142-classloader2/src/A.java | 22 + test/142-classloader2/src/Main.java | 100 + test/143-string-value/check | 20 + test/143-string-value/expected.txt | 1 + test/143-string-value/info.txt | 2 + test/143-string-value/src/Main.java | 26 + test/144-static-field-sigquit/expected.txt | 4 + test/144-static-field-sigquit/info.txt | 8 + .../src/ClassWithStaticField.java | 21 + test/144-static-field-sigquit/src/Main.java | 32 + .../144-static-field-sigquit/src/SigQuit.java | 68 + .../src/SynchronizedUse.java | 26 + test/145-alloc-tracking-stress/expected.txt | 1 + test/145-alloc-tracking-stress/info.txt | 1 + .../src-art/Main.java | 78 + test/146-bad-interface/check | 20 + test/146-bad-interface/expected.txt | 1 + test/146-bad-interface/info.txt | 1 + test/146-bad-interface/run | 19 + test/146-bad-interface/smali/invoke_inf.smali | 24 + test/146-bad-interface/src-art/Main.java | 43 + test/146-bad-interface/src-ex/A.java | 18 + test/146-bad-interface/src-ex/Iface.java | 29 + test/147-stripped-dex-fallback/expected.txt | 1 + test/147-stripped-dex-fallback/info.txt | 2 + test/147-stripped-dex-fallback/run | 24 + test/147-stripped-dex-fallback/src/Main.java | 21 + test/148-multithread-gc-annotations/check | 22 + .../expected.txt | 0 .../gc_coverage.cc | 42 + test/148-multithread-gc-annotations/info.txt | 1 + .../src/AnnoClass1.java | 23 + .../src/AnnoClass2.java | 23 + .../src/AnnoClass3.java | 23 + .../src/AnnotationThread.java | 32 + .../src/Main.java | 32 + .../src/MovingGCThread.java | 62 + test/149-suspend-all-stress/check | 18 + test/149-suspend-all-stress/expected.txt | 1 + test/149-suspend-all-stress/info.txt | 1 + test/149-suspend-all-stress/src/Main.java | 40 + test/149-suspend-all-stress/suspend_all.cc | 65 + test/150-loadlibrary/expected.txt | 3 + test/150-loadlibrary/info.txt | 1 + test/150-loadlibrary/src/Main.java | 59 + test/151-OpenFileLimit/expected.txt | 3 + test/151-OpenFileLimit/info.txt | 2 + test/151-OpenFileLimit/run | 24 + test/151-OpenFileLimit/src/Main.java | 82 + test/152-dead-large-object/expected.txt | 0 test/152-dead-large-object/info.txt | 1 + test/152-dead-large-object/src/Main.java | 26 + test/153-reference-stress/expected.txt | 1 + test/153-reference-stress/info.txt | 1 + test/153-reference-stress/src/Main.java | 73 + test/154-gc-loop/expected.txt | 2 + test/154-gc-loop/heap_interface.cc | 28 + test/154-gc-loop/info.txt | 1 + test/154-gc-loop/src/Main.java | 45 + test/155-java-set-resolved-type/expected.txt | 1 + test/155-java-set-resolved-type/info.txt | 2 + .../src-ex/TestInterface.java | 19 + test/155-java-set-resolved-type/src/Main.java | 94 + .../src/TestImplementation.java | 19 + .../src/TestInterface.java | 19 + .../src/TestParameter.java | 19 + .../expected.txt | 0 .../info.txt | 2 + .../src/Main.java | 88 + test/157-void-class/expected.txt | 2 + test/157-void-class/info.txt | 0 test/157-void-class/run | 22 + test/157-void-class/src-art/Main.java | 57 + test/158-app-image-class-table/expected.txt | 1 + test/158-app-image-class-table/info.txt | 3 + test/158-app-image-class-table/profile | 0 test/158-app-image-class-table/run | 17 + test/158-app-image-class-table/src/Main.java | 55 + .../src/TestImplementation.java | 21 + test/159-app-image-fields/expected.txt | 3 + test/159-app-image-fields/info.txt | 3 + test/159-app-image-fields/profile | 3 + test/159-app-image-fields/run | 21 + test/159-app-image-fields/src/AAA/Base.java | 22 + .../159-app-image-fields/src/AAA/Derived.java | 21 + test/159-app-image-fields/src/Main.java | 2156 ++++ test/160-read-barrier-stress/expected.txt | 0 test/160-read-barrier-stress/info.txt | 1 + test/160-read-barrier-stress/run | 18 + test/160-read-barrier-stress/src/Main.java | 1335 +++ .../src/ManyFieldsBase0.java | 1018 ++ .../src/ManyFieldsBase1.java | 1018 ++ .../src/ManyFieldsBase2.java | 1018 ++ .../src/ManyFieldsBase3.java | 1018 ++ test/161-final-abstract-class/expected.txt | 1 + test/161-final-abstract-class/info.txt | 1 + .../smali/AbstractFinal.smali | 16 + .../161-final-abstract-class/smali/Main.smali | 214 + .../smali/TestClass.smali | 22 + test/162-method-resolution/expected.txt | 46 + test/162-method-resolution/info.txt | 4 + .../jasmin-multidex/Test1User.j | 26 + .../jasmin-multidex/Test3User.j | 26 + .../162-method-resolution/jasmin/Test10Base.j | 25 + .../162-method-resolution/jasmin/Test10User.j | 36 + .../jasmin/Test1Derived.j | 43 + .../162-method-resolution/jasmin/Test1User2.j | 26 + .../jasmin/Test2Derived.j | 25 + test/162-method-resolution/jasmin/Test2User.j | 26 + .../162-method-resolution/jasmin/Test2User2.j | 23 + .../jasmin/Test3Derived.j | 25 + test/162-method-resolution/jasmin/Test4User.j | 29 + test/162-method-resolution/jasmin/Test5User.j | 40 + .../162-method-resolution/jasmin/Test5User2.j | 26 + test/162-method-resolution/jasmin/Test6User.j | 29 + .../162-method-resolution/jasmin/Test6User2.j | 29 + .../jasmin/Test8Derived.j | 33 + test/162-method-resolution/jasmin/Test8User.j | 26 + .../162-method-resolution/jasmin/Test8User2.j | 23 + .../jasmin/Test9Derived.j | 33 + test/162-method-resolution/jasmin/Test9User.j | 23 + .../162-method-resolution/jasmin/Test9User2.j | 26 + test/162-method-resolution/src/Main.java | 427 + .../src/Test10Interface.java | 18 + test/162-method-resolution/src/Test1Base.java | 21 + test/162-method-resolution/src/Test2Base.java | 21 + .../src/Test2Interface.java | 21 + test/162-method-resolution/src/Test3Base.java | 21 + .../src/Test3Interface.java | 21 + .../src/Test4Derived.java | 18 + .../src/Test4Interface.java | 19 + test/162-method-resolution/src/Test5Base.java | 18 + .../src/Test5Derived.java | 21 + .../src/Test5Interface.java | 19 + .../src/Test6Derived.java | 18 + .../src/Test6Interface.java | 19 + test/162-method-resolution/src/Test7Base.java | 21 + .../src/Test7Derived.java | 18 + .../src/Test7Interface.java | 21 + test/162-method-resolution/src/Test7User.java | 21 + .../162-method-resolution/src/Test7User2.java | 22 + test/162-method-resolution/src/Test8Base.java | 21 + test/162-method-resolution/src/Test9Base.java | 21 + test/163-app-image-methods/expected.txt | 3 + test/163-app-image-methods/info.txt | 3 + test/163-app-image-methods/profile | 2 + test/163-app-image-methods/run | 21 + test/163-app-image-methods/src/AAA/Base.java | 22 + .../src/AAA/Derived.java | 21 + test/163-app-image-methods/src/Main.java | 88 + .../expected.txt | 3 + .../info.txt | 3 + .../profile | 1 + test/164-resolution-trampoline-dex-cache/run | 22 + .../src-ex/MostDerived.java | 50 + .../src/Base.java | 21 + .../src/Derived.java | 18 + .../src/Main.java | 59 + test/165-lock-owner-proxy/expected.txt | 0 test/165-lock-owner-proxy/info.txt | 1 + test/165-lock-owner-proxy/run | 18 + test/165-lock-owner-proxy/src/Main.java | 118 + test/166-bad-interface-super/build | 27 + test/166-bad-interface-super/expected.txt | 2 + test/166-bad-interface-super/info.txt | 1 + .../jasmin/BadSuper1.j | 17 + .../jasmin/BadSuper2.j | 17 + .../smali/BadSuper1.smali | 17 + .../smali/BadSuper2.smali | 17 + .../src/BaseClass.java | 18 + .../src/BaseInterface.java | 18 + test/166-bad-interface-super/src/Main.java | 31 + test/167-visit-locks/expected.txt | 3 + test/167-visit-locks/info.txt | 1 + test/167-visit-locks/run | 18 + test/167-visit-locks/smali/TestSync.smali | 119 + test/167-visit-locks/src/Main.java | 29 + test/167-visit-locks/visit_locks.cc | 75 + test/168-vmstack-annotated/expected.txt | 0 test/168-vmstack-annotated/info.txt | 1 + test/168-vmstack-annotated/run | 18 + test/168-vmstack-annotated/src/Main.java | 225 + test/169-threadgroup-jni/expected.txt | 1 + test/169-threadgroup-jni/info.txt | 1 + test/169-threadgroup-jni/jni_daemon_thread.cc | 65 + test/169-threadgroup-jni/src/Main.java | 39 + test/170-interface-init/expected.txt | 1 + test/170-interface-init/info.txt | 2 + test/170-interface-init/src/Main.java | 50 + test/171-init-aste/expected.txt | 1 + test/171-init-aste/info.txt | 1 + test/171-init-aste/src-art/Main.java | 37 + test/171-init-aste/src/Main.java | 24 + test/172-app-image-twice/check | 18 + test/172-app-image-twice/debug_print_class.cc | 33 + test/172-app-image-twice/expected.txt | 1 + test/172-app-image-twice/info.txt | 1 + test/172-app-image-twice/profile | 1 + test/172-app-image-twice/run | 28 + test/172-app-image-twice/src/Main.java | 48 + test/172-app-image-twice/src/TestClass.java | 18 + test/173-missing-field-type/expected.txt | 1 + test/173-missing-field-type/info.txt | 1 + .../smali/BadField.smali | 34 + test/173-missing-field-type/src-art/Main.java | 34 + test/173-missing-field-type/src/Main.java | 22 + .../expected.txt | 20 + .../info.txt | 1 + .../src/Main.java | 186 + test/175-alloc-big-bignums/expected.txt | 1 + test/175-alloc-big-bignums/info.txt | 11 + test/175-alloc-big-bignums/src/Main.java | 38 + test/176-app-image-string/expected.txt | 1 + test/176-app-image-string/info.txt | 1 + test/176-app-image-string/profile | 1 + test/176-app-image-string/run | 17 + test/176-app-image-string/src/Main.java | 27 + .../expected.txt | 1 + .../177-visibly-initialized-deadlock/info.txt | 2 + .../src/Main.java | 55 + .../visibly_initialized.cc | 38 + test/178-app-image-native-method/check | 18 + test/178-app-image-native-method/expected.txt | 14 + test/178-app-image-native-method/info.txt | 1 + .../native_methods.cc | 130 + test/178-app-image-native-method/profile | 8 + test/178-app-image-native-method/run | 25 + .../178-app-image-native-method/src/Main.java | 283 + test/180-native-default-method/build | 30 + test/180-native-default-method/expected.txt | 1 + test/180-native-default-method/info.txt | 3 + .../jasmin/TestClass.j | 25 + .../jasmin/TestInterface.j | 19 + test/180-native-default-method/src/Main.java | 32 + test/1900-track-alloc/alloc.cc | 159 + test/1900-track-alloc/expected.txt | 0 test/1900-track-alloc/info.txt | 1 + test/1900-track-alloc/run | 17 + test/1900-track-alloc/src/Main.java | 21 + test/1900-track-alloc/src/art/Main.java | 1 + test/1900-track-alloc/src/art/Test1900.java | 153 + test/1901-get-bytecodes/bytecodes.cc | 64 + test/1901-get-bytecodes/expected.txt | 0 test/1901-get-bytecodes/info.txt | 3 + test/1901-get-bytecodes/run | 17 + test/1901-get-bytecodes/src/Main.java | 21 + test/1901-get-bytecodes/src/art/Test1901.java | 147 + test/1902-suspend/expected.txt | 0 test/1902-suspend/info.txt | 2 + test/1902-suspend/run | 17 + test/1902-suspend/src/Main.java | 21 + test/1902-suspend/src/art/Suspension.java | 1 + test/1902-suspend/src/art/Test1902.java | 118 + test/1903-suspend-self/expected.txt | 0 test/1903-suspend-self/info.txt | 1 + test/1903-suspend-self/run | 17 + test/1903-suspend-self/src/Main.java | 21 + .../1903-suspend-self/src/art/Suspension.java | 1 + test/1903-suspend-self/src/art/Test1903.java | 91 + test/1904-double-suspend/expected.txt | 1 + test/1904-double-suspend/info.txt | 1 + test/1904-double-suspend/run | 17 + test/1904-double-suspend/src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../1904-double-suspend/src/art/Test1904.java | 109 + test/1905-suspend-native/expected.txt | 8 + test/1905-suspend-native/info.txt | 1 + test/1905-suspend-native/native_suspend.cc | 51 + test/1905-suspend-native/run | 17 + test/1905-suspend-native/src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../1905-suspend-native/src/art/Test1905.java | 60 + test/1906-suspend-list-me-first/expected.txt | 1 + test/1906-suspend-list-me-first/info.txt | 1 + test/1906-suspend-list-me-first/run | 17 + test/1906-suspend-list-me-first/src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1906.java | 89 + .../1907-suspend-list-self-twice/expected.txt | 2 + test/1907-suspend-list-self-twice/info.txt | 1 + test/1907-suspend-list-self-twice/run | 17 + .../src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1907.java | 82 + .../expected.txt | 10 + test/1908-suspend-native-resume-self/info.txt | 1 + .../native_suspend_resume.cc | 67 + test/1908-suspend-native-resume-self/run | 17 + .../src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1908.java | 71 + test/1909-per-agent-tls/agent_tls.cc | 75 + test/1909-per-agent-tls/expected.txt | 1 + test/1909-per-agent-tls/info.txt | 1 + test/1909-per-agent-tls/run | 17 + test/1909-per-agent-tls/src/Main.java | 21 + test/1909-per-agent-tls/src/art/Main.java | 1 + test/1909-per-agent-tls/src/art/Test1909.java | 176 + test/1910-transform-with-default/expected.txt | 4 + test/1910-transform-with-default/info.txt | 4 + test/1910-transform-with-default/run | 17 + .../1910-transform-with-default/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1910.java | 84 + test/1911-get-local-var-table/expected.txt | 0 test/1911-get-local-var-table/info.txt | 1 + test/1911-get-local-var-table/run | 18 + test/1911-get-local-var-table/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1911.java | 218 + .../1912-get-set-local-primitive/expected.txt | 210 + test/1912-get-set-local-primitive/info.txt | 2 + test/1912-get-set-local-primitive/run | 18 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1912.java | 263 + test/1913-get-set-local-objects/expected.txt | 126 + test/1913-get-set-local-objects/info.txt | 2 + test/1913-get-set-local-objects/run | 18 + test/1913-get-set-local-objects/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1913.java | 280 + test/1914-get-local-instance/expected.txt | 15 + test/1914-get-local-instance/info.txt | 2 + .../1914-get-local-instance/local_instance.cc | 68 + test/1914-get-local-instance/run | 18 + test/1914-get-local-instance/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1914.java | 208 + .../expected.txt | 5 + .../info.txt | 2 + test/1915-get-set-local-current-thread/run | 18 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1915.java | 105 + test/1916-get-set-current-frame/expected.txt | 4 + test/1916-get-set-current-frame/info.txt | 2 + test/1916-get-set-current-frame/run | 18 + test/1916-get-set-current-frame/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1916.java | 148 + test/1917-get-stack-frame/expected.txt | 33 + test/1917-get-stack-frame/info.txt | 1 + test/1917-get-stack-frame/run | 18 + test/1917-get-stack-frame/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1917.java | 157 + .../expected.txt | 4 + test/1919-vminit-thread-start-timing/info.txt | 3 + test/1919-vminit-thread-start-timing/run | 17 + .../src/Main.java | 21 + .../src/art/Main.java | 1 + .../src/art/Test1919.java | 65 + .../1919-vminit-thread-start-timing/vminit.cc | 201 + test/1919-vminit-thread-start-timing/vminit.h | 30 + test/1920-suspend-native-monitor/expected.txt | 8 + test/1920-suspend-native-monitor/info.txt | 1 + .../native_suspend_monitor.cc | 81 + test/1920-suspend-native-monitor/run | 17 + .../1920-suspend-native-monitor/src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1920.java | 76 + .../expected.txt | 8 + .../info.txt | 2 + .../native_suspend_recursive_monitor.cc | 89 + .../1921-suspend-native-recursive-monitor/run | 17 + .../src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1921.java | 76 + test/1922-owned-monitors-info/expected.txt | 678 ++ test/1922-owned-monitors-info/info.txt | 3 + .../owned_monitors.cc | 144 + test/1922-owned-monitors-info/run | 17 + test/1922-owned-monitors-info/src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1922.java | 357 + test/1923-frame-pop/expected.txt | 8 + test/1923-frame-pop/info.txt | 3 + test/1923-frame-pop/run | 18 + test/1923-frame-pop/src/Main.java | 21 + test/1923-frame-pop/src/art/Breakpoint.java | 1 + test/1923-frame-pop/src/art/FramePop.java | 1 + test/1923-frame-pop/src/art/Locals.java | 1 + test/1923-frame-pop/src/art/StackTrace.java | 1 + test/1923-frame-pop/src/art/Suspension.java | 1 + test/1923-frame-pop/src/art/Test1923.java | 214 + test/1923-frame-pop/src/art/Trace.java | 1 + test/1924-frame-pop-toggle/expected.txt | 8 + .../1924-frame-pop-toggle/frame_pop_toggle.cc | 51 + test/1924-frame-pop-toggle/info.txt | 3 + test/1924-frame-pop-toggle/run | 18 + test/1924-frame-pop-toggle/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/FramePop.java | 1 + .../1924-frame-pop-toggle/src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1924.java | 216 + test/1924-frame-pop-toggle/src/art/Trace.java | 1 + test/1925-self-frame-pop/expected.txt | 4 + test/1925-self-frame-pop/info.txt | 3 + test/1925-self-frame-pop/run | 18 + test/1925-self-frame-pop/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../1925-self-frame-pop/src/art/FramePop.java | 1 + test/1925-self-frame-pop/src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../1925-self-frame-pop/src/art/Test1925.java | 168 + test/1925-self-frame-pop/src/art/Trace.java | 1 + test/1926-missed-frame-pop/expected.txt | 9 + .../1926-missed-frame-pop/frame_pop_missed.cc | 55 + test/1926-missed-frame-pop/info.txt | 3 + test/1926-missed-frame-pop/run | 18 + test/1926-missed-frame-pop/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/FramePop.java | 1 + .../1926-missed-frame-pop/src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1926.java | 227 + test/1926-missed-frame-pop/src/art/Trace.java | 1 + test/1927-exception-event/exception_event.cc | 102 + test/1927-exception-event/expected.txt | 278 + test/1927-exception-event/info.txt | 3 + test/1927-exception-event/run | 18 + test/1927-exception-event/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Exceptions.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1927.java | 277 + .../expected.txt | 120 + test/1928-exception-event-exception/info.txt | 5 + test/1928-exception-event-exception/run | 18 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Exceptions.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1928.java | 216 + .../expected.txt | 302 + test/1929-exception-catch-exception/info.txt | 1 + test/1929-exception-catch-exception/run | 18 + .../smali/art/Test1929$Impl.smali | 363 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Exceptions.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1929.java | 315 + test/1930-monitor-info/expected.txt | 31 + test/1930-monitor-info/info.txt | 3 + test/1930-monitor-info/monitor.cc | 67 + test/1930-monitor-info/run | 17 + test/1930-monitor-info/src/Main.java | 21 + test/1930-monitor-info/src/art/Monitors.java | 1 + .../1930-monitor-info/src/art/Suspension.java | 1 + test/1930-monitor-info/src/art/Test1930.java | 155 + test/1931-monitor-events/check | 22 + test/1931-monitor-events/expected.txt | 32 + test/1931-monitor-events/info.txt | 3 + test/1931-monitor-events/jvm-expected.patch | 3 + test/1931-monitor-events/run | 17 + test/1931-monitor-events/src/Main.java | 21 + .../1931-monitor-events/src/art/Monitors.java | 1 + .../src/art/Suspension.java | 1 + .../1931-monitor-events/src/art/Test1931.java | 210 + test/1932-monitor-events-misc/check | 22 + test/1932-monitor-events-misc/expected.txt | 104 + test/1932-monitor-events-misc/info.txt | 4 + .../jvm-expected.patch | 2 + test/1932-monitor-events-misc/monitor_misc.cc | 59 + test/1932-monitor-events-misc/run | 17 + test/1932-monitor-events-misc/src/Main.java | 21 + .../src/art/Monitors.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1932.java | 623 ++ .../expected.txt | 6 + test/1933-monitor-current-contended/info.txt | 4 + test/1933-monitor-current-contended/run | 17 + .../src/Main.java | 21 + .../src/art/Monitors.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1933.java | 67 + test/1934-jvmti-signal-thread/expected.txt | 27 + test/1934-jvmti-signal-thread/info.txt | 3 + test/1934-jvmti-signal-thread/run | 17 + .../signal_threads.cc | 157 + test/1934-jvmti-signal-thread/src/Main.java | 21 + .../src/art/Monitors.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1934.java | 291 + .../src/art/Threads.java | 1 + .../expected.txt | 5 + test/1935-get-set-current-frame-jit/info.txt | 2 + test/1935-get-set-current-frame-jit/run | 18 + .../src/Main.java | 189 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + test/1936-thread-end-events/check | 22 + test/1936-thread-end-events/expected.txt | 42 + test/1936-thread-end-events/info.txt | 6 + .../1936-thread-end-events/jvm-expected.patch | 16 + test/1936-thread-end-events/method_trace.cc | 75 + test/1936-thread-end-events/run | 18 + test/1936-thread-end-events/src/Main.java | 21 + .../src/art/Test1936.java | 68 + .../1936-thread-end-events/src/art/Trace.java | 1 + test/1937-transform-soft-fail/expected.txt | 3 + test/1937-transform-soft-fail/info.txt | 1 + test/1937-transform-soft-fail/run | 17 + test/1937-transform-soft-fail/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1937.java | 94 + .../expected.txt | 4 + .../info.txt | 2 + test/1938-transform-abstract-single-impl/run | 17 + .../src/Main.java | 100 + .../src/art/Redefinition.java | 1 + test/1939-proxy-frames/expected.txt | 8 + test/1939-proxy-frames/info.txt | 2 + test/1939-proxy-frames/local_instance.cc | 66 + test/1939-proxy-frames/run | 18 + test/1939-proxy-frames/src/Main.java | 21 + .../1939-proxy-frames/src/art/Breakpoint.java | 1 + test/1939-proxy-frames/src/art/Locals.java | 1 + .../1939-proxy-frames/src/art/StackTrace.java | 1 + .../1939-proxy-frames/src/art/Suspension.java | 1 + test/1939-proxy-frames/src/art/Test1939.java | 185 + test/1940-ddms-ext/ddm_ext.cc | 207 + test/1940-ddms-ext/expected.txt | 25 + test/1940-ddms-ext/info.txt | 1 + test/1940-ddms-ext/run | 17 + test/1940-ddms-ext/src-art/art/Test1940.java | 235 + test/1940-ddms-ext/src/Main.java | 21 + test/1940-ddms-ext/src/art/Test1940.java | 23 + test/1941-dispose-stress/dispose_stress.cc | 70 + test/1941-dispose-stress/expected.txt | 1 + test/1941-dispose-stress/info.txt | 3 + test/1941-dispose-stress/run | 18 + test/1941-dispose-stress/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../1941-dispose-stress/src/art/Test1941.java | 93 + test/1941-dispose-stress/src/art/Trace.java | 1 + .../expected.txt | 11 + test/1942-suspend-raw-monitor-exit/info.txt | 3 + .../native_suspend_monitor.cc | 80 + test/1942-suspend-raw-monitor-exit/run | 17 + .../src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1942.java | 73 + .../expected.txt | 7 + test/1943-suspend-raw-monitor-wait/info.txt | 4 + .../native_suspend_monitor.cc | 82 + test/1943-suspend-raw-monitor-wait/run | 17 + .../src/Main.java | 21 + .../src/art/Suspension.java | 1 + .../src/art/Test1943.java | 66 + test/1945-proxy-method-arguments/expected.txt | 26 + test/1945-proxy-method-arguments/get_args.cc | 113 + test/1945-proxy-method-arguments/info.txt | 7 + .../1945-proxy-method-arguments/src/Main.java | 149 + test/1946-list-descriptors/descriptors.cc | 140 + test/1946-list-descriptors/expected.txt | 1 + test/1946-list-descriptors/info.txt | 1 + test/1946-list-descriptors/run | 17 + .../src-art/art/Test1946.java | 127 + test/1946-list-descriptors/src/Main.java | 21 + .../src/art/Test1946.java | 23 + .../check_deopt.cc | 35 + .../expected.txt | 8 + test/1947-breakpoint-redefine-deopt/info.txt | 5 + test/1947-breakpoint-redefine-deopt/run | 18 + .../src/Main.java | 104 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + test/1948-obsolete-const-method-handle/build | 23 + .../expected.txt | 6 + .../info.txt | 6 + test/1948-obsolete-const-method-handle/run | 18 + .../util-src/build-classes | 45 + .../util-src/info.txt | 7 + .../util-src/src/Main.java | 21 + .../util-src/src/art/Redefinition.java | 91 + .../util-src/src/art/Test1948.java | 62 + .../art/constmethodhandle/BaseTestInvoke.java | 24 + .../src/art/constmethodhandle/Responses.java | 29 + .../art/constmethodhandle/TestGenerator.java | 185 + .../src/art/constmethodhandle/TestInvoke.java | 33 + test/1949-short-dex-file/expected.txt | 1 + test/1949-short-dex-file/info.txt | 30 + test/1949-short-dex-file/run | 17 + test/1949-short-dex-file/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../1949-short-dex-file/src/art/Test1949.java | 142 + test/1950-unprepared-transform/check | 22 + test/1950-unprepared-transform/expected.txt | 7 + test/1950-unprepared-transform/info.txt | 1 + .../jvm-expected.patch | 6 + test/1950-unprepared-transform/run | 17 + .../src-ex/Transform.java | 22 + test/1950-unprepared-transform/src/Main.java | 153 + .../src/art/Redefinition.java | 1 + .../unprepared_transform.cc | 77 + .../expected.txt | 1 + test/1951-monitor-enter-no-suspend/info.txt | 1 + .../raw_monitor.cc | 104 + test/1951-monitor-enter-no-suspend/run | 17 + .../src/Main.java | 21 + .../src/art/Main.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1951.java | 65 + test/1953-pop-frame/check | 21 + .../class-loading-expected.patch | 21 + test/1953-pop-frame/expected.txt | 101 + test/1953-pop-frame/info.txt | 7 + test/1953-pop-frame/pop_frame.cc | 103 + test/1953-pop-frame/run | 24 + test/1953-pop-frame/src/Main.java | 23 + test/1953-pop-frame/src/art/Breakpoint.java | 1 + test/1953-pop-frame/src/art/Redefinition.java | 1 + test/1953-pop-frame/src/art/StackTrace.java | 1 + .../1953-pop-frame/src/art/SuspendEvents.java | 1 + test/1953-pop-frame/src/art/Suspension.java | 1 + test/1953-pop-frame/src/art/Test1953.java | 978 ++ test/1954-pop-frame-jit/check | 21 + test/1954-pop-frame-jit/expected.txt | 121 + test/1954-pop-frame-jit/info.txt | 7 + test/1954-pop-frame-jit/jvm-expected.patch | 21 + test/1954-pop-frame-jit/run | 24 + test/1954-pop-frame-jit/src/Main.java | 60 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + test/1954-pop-frame-jit/src/art/Test1953.java | 1 + test/1955-pop-frame-jit-called/check | 21 + test/1955-pop-frame-jit-called/expected.txt | 121 + test/1955-pop-frame-jit-called/info.txt | 7 + .../jvm-expected.patch | 21 + test/1955-pop-frame-jit-called/run | 26 + test/1955-pop-frame-jit-called/src/Main.java | 53 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1953.java | 1 + test/1956-pop-frame-jit-calling/check | 21 + test/1956-pop-frame-jit-calling/expected.txt | 121 + test/1956-pop-frame-jit-calling/info.txt | 7 + .../jvm-expected.patch | 21 + test/1956-pop-frame-jit-calling/run | 26 + test/1956-pop-frame-jit-calling/src/Main.java | 53 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1953.java | 1 + test/1957-error-ext/expected.txt | 4 + test/1957-error-ext/info.txt | 1 + test/1957-error-ext/lasterror.cc | 112 + test/1957-error-ext/run | 18 + test/1957-error-ext/src/Main.java | 21 + test/1957-error-ext/src/art/Redefinition.java | 1 + test/1957-error-ext/src/art/Test1957.java | 86 + test/1958-transform-try-jit/expected.txt | 2 + test/1958-transform-try-jit/info.txt | 5 + test/1958-transform-try-jit/run | 17 + test/1958-transform-try-jit/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1958.java | 106 + .../expected.txt | 0 .../fake_redef_object.cc | 134 + test/1959-redefine-object-instrument/info.txt | 9 + test/1959-redefine-object-instrument/run | 17 + .../src/Main.java | 76 + .../src/art/Breakpoint.java | 1 + test/1960-checker-bounds-codegen/expected.txt | 1 + test/1960-checker-bounds-codegen/info.txt | 1 + .../1960-checker-bounds-codegen/src/Main.java | 108 + .../expected.txt | 100 + .../info.txt | 2 + .../native_say_hi.cc | 36 + test/1960-obsolete-jit-multithread-native/run | 17 + .../src/Main.java | 203 + .../src/Transform.java | 32 + .../src/art/Redefinition.java | 1 + .../1961-checker-loop-vectorizer/expected.txt | 3 + test/1961-checker-loop-vectorizer/info.txt | 1 + .../src/Main.java | 83 + .../expected.txt | 100 + test/1961-obsolete-jit-multithread/info.txt | 2 + test/1961-obsolete-jit-multithread/run | 17 + .../src/Main.java | 201 + .../src/Transform.java | 25 + .../src/art/Redefinition.java | 1 + test/1962-multi-thread-events/expected.txt | 4 + test/1962-multi-thread-events/info.txt | 5 + .../multi_thread_events.cc | 91 + test/1962-multi-thread-events/run | 17 + test/1962-multi-thread-events/src/Main.java | 21 + .../src/art/Test1962.java | 83 + .../add_to_loader.cc | 90 + .../check | 26 + .../check_memfd_create.cc | 67 + .../expected.txt | 19 + .../info.txt | 1 + .../1963-add-to-dex-classloader-in-memory/run | 17 + .../src/Main.java | 32 + .../src/art/Redefinition.java | 1 + .../src/art/Test1963.java | 240 + .../add_to_loader.cc | 90 + .../expected.txt | 23 + .../1964-add-to-dex-classloader-file/info.txt | 1 + test/1964-add-to-dex-classloader-file/run | 17 + .../src-ex/foobar/NewClass.java | 44 + .../src/Main.java | 210 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../build | 25 + .../expected.txt | 210 + .../info.txt | 2 + .../jasmin/TestCases1965.j | 173 + .../run | 21 + .../smali/TestCases1965.smali | 140 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1965.java | 213 + .../1966-get-set-local-objects-no-table/build | 25 + .../expected.txt | 162 + .../info.txt | 2 + .../jasmin/TestCases1966.j | 161 + test/1966-get-set-local-objects-no-table/run | 21 + .../smali/TestCases1966.smali | 121 + .../src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1966.java | 237 + test/1967-get-set-local-bad-slot/expected.txt | 108 + test/1967-get-set-local-bad-slot/info.txt | 2 + test/1967-get-set-local-bad-slot/run | 18 + .../1967-get-set-local-bad-slot/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Locals.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1967.java | 230 + test/1968-force-early-return/expected.txt | 195 + .../force_early_return.cc | 82 + test/1968-force-early-return/info.txt | 4 + test/1968-force-early-return/run | 24 + test/1968-force-early-return/src/Main.java | 22 + .../src/art/Breakpoint.java | 1 + .../src/art/NonStandardExit.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1968.java | 903 ++ test/1969-force-early-return-void/check | 21 + .../class-loading-expected.patch | 35 + .../1969-force-early-return-void/expected.txt | 178 + .../force_early_return_void.cc | 102 + test/1969-force-early-return-void/info.txt | 4 + test/1969-force-early-return-void/run | 17 + .../src/Main.java | 22 + .../src/art/Breakpoint.java | 1 + .../src/art/NonStandardExit.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1969.java | 973 ++ .../1970-force-early-return-long/expected.txt | 222 + .../force_early_return_long.cc | 81 + test/1970-force-early-return-long/info.txt | 4 + test/1970-force-early-return-long/run | 24 + .../src/Main.java | 22 + .../src/art/Breakpoint.java | 1 + .../src/art/NonStandardExit.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1970.java | 887 ++ .../expected.txt | 3 + test/1971-multi-force-early-return/info.txt | 4 + test/1971-multi-force-early-return/run | 24 + .../src/Main.java | 22 + .../src/art/Breakpoint.java | 1 + .../src/art/NonStandardExit.java | 1 + .../src/art/StackTrace.java | 1 + .../src/art/SuspendEvents.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test1971.java | 181 + test/1972-jni-id-swap-indices/expected.txt | 7 + test/1972-jni-id-swap-indices/info.txt | 3 + test/1972-jni-id-swap-indices/jni_id.cc | 60 + test/1972-jni-id-swap-indices/run | 19 + test/1972-jni-id-swap-indices/src/Main.java | 78 + test/1973-jni-id-swap-pointer/expected.txt | 6 + test/1973-jni-id-swap-pointer/info.txt | 1 + test/1973-jni-id-swap-pointer/run | 19 + test/1973-jni-id-swap-pointer/src/Main.java | 69 + test/1974-resize-array/expected.txt | 84 + test/1974-resize-array/info.txt | 3 + test/1974-resize-array/resize_array.cc | 268 + test/1974-resize-array/run | 18 + test/1974-resize-array/src/Main.java | 21 + test/1974-resize-array/src/art/Main.java | 1 + test/1974-resize-array/src/art/Test1974.java | 534 + .../expected.txt | 98 + .../info.txt | 1 + test/1975-hello-structural-transformation/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1975.java | 264 + .../src/art/Transform1975.java | 93 + .../structural_transform.cc | 77 + .../expected.txt | 70 + .../info.txt | 1 + test/1976-hello-structural-static-methods/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1976.java | 203 + .../src/art/Transform1976.java | 74 + .../structural_transform_methods.cc | 77 + .../expected.txt | 9 + .../info.txt | 1 + test/1977-hello-structural-obsolescence/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1977.java | 119 + .../expected.txt | 21 + .../info.txt | 1 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1978.java | 213 + .../expected.txt | 16 + .../info.txt | 2 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1979.java | 172 + .../1980-obsolete-object-cleared/expected.txt | 440 + test/1980-obsolete-object-cleared/info.txt | 2 + test/1980-obsolete-object-cleared/run | 17 + .../src/Main.java | 304 + .../src/art/Redefinition.java | 1 + .../build | 20 + .../expected.txt | 37 + .../expected_no_mh.txt | 21 + .../info.txt | 2 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1981.java | 347 + .../src/art/Test1981_Varhandles.java | 49 + .../expected.txt | 30 + .../info.txt | 2 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1982.java | 200 + .../build | 20 + .../expected-cts.txt | 30 + .../expected.txt | 37 + .../info.txt | 1 + .../1983-structural-redefinition-failures/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1983.java | 120 + .../expected.txt | 31 + .../info.txt | 1 + test/1984-structural-redefine-field-trace/run | 18 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1984.java | 134 + .../src/art/Trace.java | 1 + .../expected.txt | 1 + .../info.txt | 1 + test/1985-structural-redefine-stack-scope/run | 18 + .../src/Main.java | 81 + .../src/art/Redefinition.java | 1 + .../stack_scope.cc | 83 + .../expected.txt | 1 + .../info.txt | 1 + .../run | 18 + .../src/Main.java | 127 + .../src/art/Redefinition.java | 1 + .../expected.txt | 12 + .../info.txt | 1 + .../run | 18 + .../src/Main.java | 90 + .../src/art/Redefinition.java | 1 + .../expected.txt | 5 + test/1988-multi-structural-redefine/info.txt | 1 + test/1988-multi-structural-redefine/run | 18 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1988.java | 126 + test/1989-transform-bad-monitor/expected.txt | 6 + test/1989-transform-bad-monitor/info.txt | 6 + test/1989-transform-bad-monitor/run | 17 + test/1989-transform-bad-monitor/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1989.java | 97 + test/1990-structural-bad-verify/expected.txt | 2 + test/1990-structural-bad-verify/info.txt | 6 + test/1990-structural-bad-verify/run | 17 + test/1990-structural-bad-verify/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1990.java | 123 + .../expected.txt | 2 + .../info.txt | 1 + test/1991-hello-structural-retransform/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1991.java | 79 + .../expected.txt | 2 + test/1992-retransform-no-such-field/info.txt | 1 + test/1992-retransform-no-such-field/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1992.java | 89 + .../1993-fallback-non-structural/expected.txt | 3 + test/1993-fallback-non-structural/info.txt | 4 + test/1993-fallback-non-structural/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1993.java | 86 + .../expected.txt | 5 + test/1994-final-virtual-structural/info.txt | 3 + test/1994-final-virtual-structural/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1994.java | 88 + .../expected.txt | 0 .../info.txt | 4 + .../run | 21 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1995.java | 170 + .../expected.txt | 6 + .../info.txt | 3 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1996.java | 94 + .../expected.txt | 6 + test/1997-structural-shadow-method/info.txt | 1 + test/1997-structural-shadow-method/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1997.java | 84 + .../1998-structural-shadow-field/expected.txt | 4 + test/1998-structural-shadow-field/info.txt | 1 + test/1998-structural-shadow-field/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1998.java | 65 + test/1999-virtual-structural/expected.txt | 4 + test/1999-virtual-structural/info.txt | 3 + test/1999-virtual-structural/run | 17 + test/1999-virtual-structural/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test1999.java | 85 + .../AbstractCollection.patch | 16 + test/2000-virtual-list-structural/build | 31 + .../2000-virtual-list-structural/expected.txt | 5 + test/2000-virtual-list-structural/info.txt | 3 + test/2000-virtual-list-structural/run | 17 + .../src-ex/java/util/AbstractCollection.java | 1 + .../src/Main.java | 100 + .../src/art/Redefinition.java | 1 + .../expected.txt | 0 .../info.txt | 4 + test/2001-virtual-structural-multithread/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2001.java | 233 + .../src/Main.java | 21 + .../expected.txt | 1 + .../info.txt | 4 + test/2002-virtual-structural-initializing/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2002.java | 161 + .../src/Main.java | 21 + .../expected.txt | 6 + test/2003-double-virtual-structural/info.txt | 4 + test/2003-double-virtual-structural/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test2003.java | 127 + .../expected.txt | 2 + .../info.txt | 4 + .../run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test2004.java | 114 + .../expected.txt | 0 .../info.txt | 5 + .../pause-all.cc | 88 + .../2005-pause-all-redefine-multithreaded/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Suspension.java | 1 + .../src/art/Test2005.java | 210 + .../expected.txt | 3 + .../info.txt | 4 + test/2006-virtual-structural-finalizing/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2006.java | 115 + .../src/Main.java | 21 + .../expected.txt | 2 + .../info.txt | 4 + test/2007-virtual-structural-finalizable/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2007.java | 121 + .../src/Main.java | 21 + .../expected.txt | 2 + .../info.txt | 4 + test/2008-redefine-then-old-reflect-field/run | 17 + .../src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test2008.java | 82 + test/2009-structural-local-ref/expected.txt | 8 + test/2009-structural-local-ref/info.txt | 3 + test/2009-structural-local-ref/local-ref.cc | 88 + test/2009-structural-local-ref/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2009.java | 147 + test/2009-structural-local-ref/src/Main.java | 21 + .../expected.txt | 0 .../info.txt | 1 + .../src/Main.java | 473 + .../expected.txt | 2 + .../info.txt | 3 + .../src/Main.java | 66 + .../stack_walk_concurrent.cc | 97 + .../expected.txt | 8 + .../info.txt | 3 + .../run | 17 + .../set-jni-id-used.cc | 49 + .../src-art/Main.java | 62 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test1983.java | 120 + .../expected.txt | 0 test/2019-constantcalculationsinking/info.txt | 1 + .../src/Main.java | 229 + test/202-thread-oome/expected.txt | 0 test/202-thread-oome/info.txt | 1 + test/202-thread-oome/src/Main.java | 29 + test/2020-InvokeVirtual-Inlining/expected.txt | 0 test/2020-InvokeVirtual-Inlining/info.txt | 1 + .../2020-InvokeVirtual-Inlining/src/Main.java | 84 + .../2020-InvokeVirtual-Inlining/src/Test.java | 40 + test/2021-InvokeStatic-Inlining/expected.txt | 0 test/2021-InvokeStatic-Inlining/info.txt | 1 + test/2021-InvokeStatic-Inlining/src/Main.java | 85 + test/2022-Invariantloops/expected.txt | 0 test/2022-Invariantloops/info.txt | 2 + test/2022-Invariantloops/src/Main.java | 48 + .../2023-InvariantLoops_typecast/expected.txt | 0 test/2023-InvariantLoops_typecast/info.txt | 2 + .../src/Main.java | 79 + test/2024-InvariantNegativeLoop/expected.txt | 0 test/2024-InvariantNegativeLoop/info.txt | 1 + test/2024-InvariantNegativeLoop/src/Main.java | 51 + test/2025-ChangedArrayValue/expected.txt | 0 test/2025-ChangedArrayValue/info.txt | 1 + test/2025-ChangedArrayValue/src/Main.java | 55 + .../expected.txt | 0 test/2026-DifferentMemoryLSCouples/info.txt | 1 + .../src/Main.java | 51 + .../expected.txt | 0 test/2027-TwiceTheSameMemoryCouple/info.txt | 1 + .../src/Main.java | 41 + test/2028-MultiBackward/expected.txt | 0 test/2028-MultiBackward/info.txt | 1 + test/2028-MultiBackward/src/Main.java | 51 + test/2029-contended-monitors/expected.txt | 10 + test/2029-contended-monitors/info.txt | 4 + test/2029-contended-monitors/src/Main.java | 195 + test/2029-spaces-in-SimpleName/build | 40 + test/2029-spaces-in-SimpleName/classes.dex | Bin 0 -> 808 bytes test/2029-spaces-in-SimpleName/expected.txt | 1 + test/2029-spaces-in-SimpleName/info.txt | 5 + .../src/SpacesInSimpleName.java | 73 + test/203-multi-checkpoint/expected.txt | 5 + test/203-multi-checkpoint/info.txt | 4 + test/203-multi-checkpoint/multi_checkpoint.cc | 90 + test/203-multi-checkpoint/src/Main.java | 59 + test/2030-long-running-child/expected.txt | 3 + test/2030-long-running-child/info.txt | 3 + test/2030-long-running-child/src/Main.java | 60 + .../expected.txt | 3 + .../2031-zygote-compiled-frame-deopt/info.txt | 5 + .../native-wait.cc | 91 + test/2031-zygote-compiled-frame-deopt/run | 21 + .../src/Main.java | 22 + .../src/art/Redefinition.java | 1 + .../src/art/Test2031.java | 186 + .../expected.txt | 6 + .../info.txt | 7 + .../jasmin/Concrete1.j | 34 + .../jasmin/Concrete2.j | 34 + .../jasmin/Concrete3.j | 34 + .../src/Concrete2Base.java | 17 + .../src/IFace.java | 21 + .../src/Main.java | 33 + .../expected.txt | 3 + test/2035-structural-native-method/info.txt | 5 + test/2035-structural-native-method/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2035.java | 82 + .../src/Main.java | 21 + .../structural-native.cc | 46 + .../expected.txt | 8 + test/2036-structural-subclass-shadow/info.txt | 5 + test/2036-structural-subclass-shadow/run | 17 + .../src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 1 + .../src-art/art/Test2036.java | 136 + .../src/Main.java | 21 + test/2230-profile-save-hotness/expected.txt | 1 + test/2230-profile-save-hotness/info.txt | 1 + test/2230-profile-save-hotness/run | 21 + .../src-art/Main.java | 90 + test/300-package-override/expected.txt | 4 + test/300-package-override/info.txt | 2 + test/300-package-override/src/Main.java | 28 + .../src/p1/BaseClass.java | 23 + .../src/p2/DerivedClass.java | 22 + .../src/p2/DerivedClass2.java | 22 + test/301-abstract-protected/expected.txt | 1 + test/301-abstract-protected/info.txt | 3 + test/301-abstract-protected/src/Main.java | 33 + test/302-float-conversion/expected.txt | 3 + test/302-float-conversion/info.txt | 5 + test/302-float-conversion/src/Main.java | 64 + test/303-verification-stress/build | 24 + test/303-verification-stress/classes-gen.c | 64 + test/303-verification-stress/expected.txt | 42 + test/303-verification-stress/info.txt | 7 + test/303-verification-stress/src/Main.java | 27 + test/304-method-tracing/expected.txt | 0 test/304-method-tracing/info.txt | 1 + test/304-method-tracing/run | 18 + test/304-method-tracing/src/Main.java | 48 + test/305-other-fault-handler/expected.txt | 2 + test/305-other-fault-handler/fault_handler.cc | 103 + test/305-other-fault-handler/info.txt | 3 + test/305-other-fault-handler/src/Main.java | 25 + test/370-dex-v37/build | 30 + test/370-dex-v37/expected.txt | 1 + test/370-dex-v37/info.txt | 1 + test/370-dex-v37/src/Main.java | 21 + test/401-optimizing-compiler/expected.txt | 14 + test/401-optimizing-compiler/info.txt | 2 + test/401-optimizing-compiler/src/Main.java | 243 + test/402-optimizing-control-flow/expected.txt | 0 test/402-optimizing-control-flow/info.txt | 1 + .../402-optimizing-control-flow/src/Main.java | 89 + test/403-optimizing-long/expected.txt | 1 + test/403-optimizing-long/info.txt | 1 + test/403-optimizing-long/src/Main.java | 115 + test/404-optimizing-allocator/expected.txt | 0 test/404-optimizing-allocator/info.txt | 1 + test/404-optimizing-allocator/src/Main.java | 166 + .../expected.txt | 0 test/405-optimizing-long-allocator/info.txt | 1 + .../src/Main.java | 172 + test/406-fields/expected.txt | 0 test/406-fields/info.txt | 0 test/406-fields/src/Main.java | 70 + test/406-fields/src/TestCase.java | 199 + test/407-arrays/expected.txt | 0 test/407-arrays/info.txt | 1 + test/407-arrays/src/Main.java | 162 + test/407-arrays/src/TestCase.java | 199 + test/408-move-bug/expected.txt | 0 test/408-move-bug/info.txt | 2 + test/408-move-bug/src/Main.java | 69 + test/409-materialized-condition/expected.txt | 5 + test/409-materialized-condition/info.txt | 1 + test/409-materialized-condition/src/Main.java | 153 + test/410-floats/expected.txt | 0 test/410-floats/info.txt | 1 + test/410-floats/src/Main.java | 140 + test/411-checker-hdiv-hrem-pow2/expected.txt | 0 test/411-checker-hdiv-hrem-pow2/info.txt | 2 + .../src/DivTest.java | 263 + test/411-checker-hdiv-hrem-pow2/src/Main.java | 22 + .../src/RemTest.java | 373 + test/411-optimizing-arith/expected.txt | 0 test/411-optimizing-arith/info.txt | 7 + test/411-optimizing-arith/src/DivTest.java | 250 + test/411-optimizing-arith/src/Main.java | 26 + test/411-optimizing-arith/src/MulTest.java | 180 + test/411-optimizing-arith/src/NegTest.java | 197 + test/411-optimizing-arith/src/RemTest.java | 205 + test/411-optimizing-arith/src/ShiftsTest.java | 330 + test/411-optimizing-arith/src/SubTest.java | 170 + test/412-new-array/expected.txt | 0 test/412-new-array/info.txt | 3 + .../412-new-array/smali/fill_array_data.smali | 108 + .../smali/filled_new_array.smali | 45 + .../smali/filled_new_array_verify_error.smali | 10 + test/412-new-array/src/Main.java | 525 + test/412-new-array/src/TestCase.java | 199 + test/413-regalloc-regression/expected.txt | 0 test/413-regalloc-regression/info.txt | 2 + test/413-regalloc-regression/src/Main.java | 41 + test/414-static-fields/expected.txt | 0 test/414-static-fields/info.txt | 1 + test/414-static-fields/src/Main.java | 98 + test/414-static-fields/src/Other.java | 19 + .../src/OtherWithClinit.java | 23 + test/414-static-fields/src/TestCase.java | 199 + test/416-optimizing-arith-not/expected.txt | 0 test/416-optimizing-arith-not/info.txt | 1 + test/416-optimizing-arith-not/smali/not.smali | 15 + test/416-optimizing-arith-not/src/Main.java | 79 + test/418-const-string/expected.txt | 2 + test/418-const-string/info.txt | 1 + test/418-const-string/src/Main.java | 28 + test/419-long-parameter/expected.txt | 0 test/419-long-parameter/info.txt | 3 + test/419-long-parameter/src/Main.java | 34 + test/420-const-class/expected.txt | 16 + test/420-const-class/info.txt | 1 + test/420-const-class/src/Main.java | 77 + test/421-exceptions/expected.txt | 20 + test/421-exceptions/info.txt | 1 + test/421-exceptions/src/Main.java | 65 + test/421-large-frame/expected.txt | 0 test/421-large-frame/info.txt | 1 + test/421-large-frame/src/Main.java | 1043 ++ test/422-instanceof/expected.txt | 0 test/422-instanceof/info.txt | 1 + test/422-instanceof/src/Main.java | 70 + test/422-type-conversion/expected.txt | 0 test/422-type-conversion/info.txt | 1 + test/422-type-conversion/src/Main.java | 695 ++ test/423-invoke-interface/expected.txt | 0 test/423-invoke-interface/info.txt | 2 + test/423-invoke-interface/src/Main.java | 120 + test/424-checkcast/expected.txt | 0 test/424-checkcast/info.txt | 1 + test/424-checkcast/src/Main.java | 73 + test/425-invoke-super/expected.txt | 1 + test/425-invoke-super/info.txt | 1 + test/425-invoke-super/smali/invokesuper.smali | 40 + test/425-invoke-super/smali/subclass.smali | 29 + test/425-invoke-super/smali/superclass.smali | 29 + test/425-invoke-super/src/Main.java | 55 + test/426-monitor/expected.txt | 5 + test/426-monitor/info.txt | 1 + test/426-monitor/src/Main.java | 52 + test/427-bitwise/expected.txt | 0 test/427-bitwise/info.txt | 1 + test/427-bitwise/src/Main.java | 233 + test/427-bounds/expected.txt | 0 test/427-bounds/info.txt | 2 + test/427-bounds/src/Main.java | 51 + test/429-ssa-builder/expected.txt | 0 test/429-ssa-builder/info.txt | 3 + test/429-ssa-builder/src/Main.java | 49 + test/430-live-register-slow-path/expected.txt | 0 test/430-live-register-slow-path/info.txt | 2 + .../430-live-register-slow-path/src/Main.java | 39 + test/431-type-propagation/expected.txt | 1 + test/431-type-propagation/info.txt | 2 + .../smali/TypePropagation.smali | 43 + test/431-type-propagation/src/Main.java | 28 + test/432-optimizing-cmp/expected.txt | 0 test/432-optimizing-cmp/info.txt | 1 + test/432-optimizing-cmp/smali/cmp.smali | 33 + test/432-optimizing-cmp/src/Main.java | 266 + test/433-gvn/expected.txt | 1 + test/433-gvn/info.txt | 3 + test/433-gvn/src/Main.java | 38 + test/434-invoke-direct/expected.txt | 0 test/434-invoke-direct/info.txt | 2 + test/434-invoke-direct/smali/invoke.smali | 30 + .../src/InvokeDirectSuper.java | 23 + test/434-invoke-direct/src/Main.java | 40 + test/434-shifter-operand/expected.txt | 3 + test/434-shifter-operand/info.txt | 2 + test/434-shifter-operand/src/Main.java | 27 + test/435-new-instance/expected.txt | 0 test/435-new-instance/info.txt | 7 + test/435-new-instance/smali/instance.smali | 55 + test/435-new-instance/src/Main.java | 46 + test/435-new-instance/src/TestClass.java | 21 + test/435-new-instance/src/TestInterface.java | 19 + .../src/pkg/ProtectedClass.java | 20 + .../expected.txt | 3 + test/435-try-finally-without-catch/info.txt | 26 + .../src/Main.java | 41 + test/436-rem-float/expected.txt | 0 test/436-rem-float/info.txt | 1 + test/436-rem-float/src/Main.java | 264 + test/436-shift-constant/expected.txt | 0 test/436-shift-constant/info.txt | 1 + test/436-shift-constant/src/Main.java | 42 + test/437-inline/expected.txt | 0 test/437-inline/info.txt | 1 + test/437-inline/src/Main.java | 103 + test/438-volatile/expected.txt | 0 test/438-volatile/info.txt | 1 + test/438-volatile/src/Main.java | 53 + test/439-npe/expected.txt | 124 + test/439-npe/info.txt | 2 + test/439-npe/src/Main.java | 902 ++ test/439-swap-double/expected.txt | 4 + test/439-swap-double/info.txt | 2 + test/439-swap-double/src/Main.java | 46 + test/440-stmp/expected.txt | 1 + test/440-stmp/info.txt | 3 + test/440-stmp/src/Main.java | 47 + test/441-checker-inliner/expected.txt | 0 test/441-checker-inliner/info.txt | 1 + test/441-checker-inliner/smali/Smali.smali | 97 + test/441-checker-inliner/src/Main.java | 251 + .../442-checker-constant-folding/expected.txt | 0 test/442-checker-constant-folding/info.txt | 1 + .../smali/TestCmp.smali | 470 + .../src/Main.java | 1722 +++ test/443-not-bool-inline/expected.txt | 1 + test/443-not-bool-inline/info.txt | 2 + test/443-not-bool-inline/src/Main.java | 31 + test/444-checker-nce/expected.txt | 0 test/444-checker-nce/info.txt | 1 + test/444-checker-nce/src/Main.java | 265 + test/445-checker-licm/expected.txt | 1 + test/445-checker-licm/info.txt | 1 + test/445-checker-licm/src/Main.java | 273 + test/446-checker-inliner2/expected.txt | 0 test/446-checker-inliner2/info.txt | 1 + test/446-checker-inliner2/src/Main.java | 72 + test/447-checker-inliner3/expected.txt | 0 test/447-checker-inliner3/info.txt | 1 + test/447-checker-inliner3/src/Main.java | 77 + test/448-multiple-returns/expected.txt | 0 test/448-multiple-returns/info.txt | 2 + .../smali/MultipleReturns.smali | 45 + test/448-multiple-returns/src/Main.java | 32 + test/449-checker-bce/expected.txt | 2 + test/449-checker-bce/info.txt | 1 + test/449-checker-bce/src/Main.java | 1925 ++++ test/450-checker-types/expected.txt | 0 test/450-checker-types/info.txt | 1 + test/450-checker-types/smali/Main2.smali | 55 + test/450-checker-types/src/Main.java | 767 ++ test/451-regression-add-float/expected.txt | 0 test/451-regression-add-float/info.txt | 2 + test/451-regression-add-float/src/Main.java | 72 + test/451-spill-splot/expected.txt | 6 + test/451-spill-splot/info.txt | 2 + test/451-spill-splot/src/Main.java | 96 + test/452-multiple-returns2/expected.txt | 0 test/452-multiple-returns2/info.txt | 2 + .../smali/MultipleReturns.smali | 40 + test/452-multiple-returns2/src/Main.java | 32 + test/453-not-byte/expected.txt | 0 test/453-not-byte/info.txt | 2 + test/453-not-byte/smali/NotByte.smali | 23 + test/453-not-byte/src/Main.java | 32 + test/454-get-vreg/expected.txt | 1 + test/454-get-vreg/get_vreg_jni.cc | 133 + test/454-get-vreg/info.txt | 1 + test/454-get-vreg/src/Main.java | 50 + test/455-checker-gvn/expected.txt | 3 + test/455-checker-gvn/info.txt | 1 + test/455-checker-gvn/smali/Smali.smali | 42 + test/455-checker-gvn/src/Main.java | 85 + test/456-baseline-array-set/expected.txt | 0 test/456-baseline-array-set/info.txt | 3 + test/456-baseline-array-set/src/Main.java | 33 + test/457-regs/expected.txt | 1 + test/457-regs/info.txt | 1 + test/457-regs/regs_jni.cc | 153 + test/457-regs/smali/PhiLiveness.smali | 82 + test/457-regs/src/Main.java | 43 + .../expected.txt | 0 .../info.txt | 1 + .../smali/SmaliTests.smali | 446 + .../smali/SmaliTests2.smali | 305 + .../src/Main.java | 2810 +++++ test/458-long-to-fpu/expected.txt | 2 + test/458-long-to-fpu/info.txt | 2 + test/458-long-to-fpu/src/Main.java | 49 + test/459-dead-phi/expected.txt | 1 + test/459-dead-phi/info.txt | 1 + test/459-dead-phi/smali/EquivalentPhi.smali | 41 + test/459-dead-phi/src/Main.java | 29 + test/460-multiple-returns3/expected.txt | 0 test/460-multiple-returns3/info.txt | 2 + .../smali/MultipleReturns.smali | 40 + test/460-multiple-returns3/src/Main.java | 32 + test/461-get-reference-vreg/expected.txt | 1 + .../get_reference_vreg_jni.cc | 101 + test/461-get-reference-vreg/info.txt | 1 + test/461-get-reference-vreg/src/Main.java | 62 + .../expected.txt | 0 test/462-checker-inlining-dex-files/info.txt | 1 + .../src-multidex/OtherDex.java | 64 + .../src/Main.java | 206 + .../expected.txt | 0 test/463-checker-boolean-simplifier/info.txt | 1 + .../smali/Main2.smali | 308 + .../src-art/Main.java | 280 + .../src/Main.java | 22 + .../expected.txt | 0 .../464-checker-inline-sharpen-calls/info.txt | 1 + .../src/Main.java | 64 + test/465-checker-clinit-gvn/expected.txt | 0 test/465-checker-clinit-gvn/info.txt | 1 + test/465-checker-clinit-gvn/src/Main.java | 78 + test/466-get-live-vreg/expected.txt | 1 + test/466-get-live-vreg/get_live_vreg_jni.cc | 90 + test/466-get-live-vreg/info.txt | 3 + test/466-get-live-vreg/src/Main.java | 97 + test/467-regalloc-pair/expected.txt | 1 + test/467-regalloc-pair/info.txt | 2 + test/467-regalloc-pair/smali/TestCase.smali | 59 + test/467-regalloc-pair/src/Main.java | 37 + .../expected.txt | 0 .../info.txt | 2 + .../smali/TestCase.smali | 47 + .../src/Main.java | 38 + .../expected.txt | 0 test/469-condition-materialization/info.txt | 2 + .../src/Main.java | 48 + test/470-huge-method/expected.txt | 0 test/470-huge-method/info.txt | 1 + test/470-huge-method/src/Main.java | 2033 ++++ test/471-deopt-environment/expected.txt | 0 test/471-deopt-environment/info.txt | 3 + test/471-deopt-environment/src/Main.java | 47 + test/471-uninitialized-locals/expected.txt | 0 test/471-uninitialized-locals/info.txt | 2 + .../471-uninitialized-locals/smali/Test.smali | 23 + test/471-uninitialized-locals/src/Main.java | 37 + test/472-type-propagation/expected.txt | 2 + test/472-type-propagation/info.txt | 3 + test/472-type-propagation/src/Main.java | 33 + .../expected.txt | 3 + test/472-unreachable-if-regression/info.txt | 3 + .../smali/Test.smali | 46 + .../src/Main.java | 37 + .../expected.txt | 0 test/473-checker-inliner-constants/info.txt | 1 + .../src/Main.java | 76 + test/473-remove-dead-block/expected.txt | 1 + test/473-remove-dead-block/info.txt | 3 + test/473-remove-dead-block/src/Main.java | 42 + test/474-checker-boolean-input/expected.txt | 0 test/474-checker-boolean-input/info.txt | 1 + test/474-checker-boolean-input/src/Main.java | 108 + test/474-fp-sub-neg/expected.txt | 13 + test/474-fp-sub-neg/info.txt | 11 + test/474-fp-sub-neg/src/Main.java | 74 + test/475-regression-inliner-ids/expected.txt | 0 test/475-regression-inliner-ids/info.txt | 2 + .../smali/TestCase.smali | 76 + test/475-regression-inliner-ids/src/Main.java | 33 + test/475-simplify-mul-zero/expected.txt | 1 + test/475-simplify-mul-zero/info.txt | 2 + test/475-simplify-mul-zero/src/Main.java | 28 + .../expected.txt | 0 .../info.txt | 2 + .../src/Main.java | 844 ++ .../expected.txt | 0 test/476-checker-ctor-memory-barrier/info.txt | 2 + .../src/Main.java | 276 + .../expected.txt | 2 + test/476-clinit-inline-static-invoke/info.txt | 3 + .../src/Main.java | 47 + test/477-checker-bound-type/expected.txt | 0 test/477-checker-bound-type/info.txt | 3 + test/477-checker-bound-type/src/Main.java | 135 + .../expected.txt | 0 .../info.txt | 1 + .../src/Main.java | 40 + .../expected.txt | 17 + .../478-checker-clinit-check-pruning/info.txt | 3 + .../src/Main.java | 685 ++ test/478-checker-inline-noreturn/expected.txt | 0 test/478-checker-inline-noreturn/info.txt | 3 + .../478-checker-inline-noreturn/src/Main.java | 60 + .../expected.txt | 0 test/478-checker-inliner-nested-loop/info.txt | 2 + .../src/Main.java | 57 + .../expected.txt | 0 .../info.txt | 2 + .../src/Main.java | 50 + test/480-checker-dead-blocks/expected.txt | 0 test/480-checker-dead-blocks/info.txt | 1 + .../480-checker-dead-blocks/smali/Smali.smali | 122 + test/480-checker-dead-blocks/src/Main.java | 155 + test/481-regression-phi-cond/expected.txt | 0 test/481-regression-phi-cond/info.txt | 2 + test/481-regression-phi-cond/src/Main.java | 51 + .../expected.txt | 0 test/482-checker-loop-back-edge-use/info.txt | 2 + .../src/Main.java | 204 + test/483-dce-block/expected.txt | 1 + test/483-dce-block/info.txt | 2 + test/483-dce-block/src/Main.java | 58 + test/484-checker-register-hints/expected.txt | 0 test/484-checker-register-hints/info.txt | 4 + .../smali/Smali.smali | 143 + test/484-checker-register-hints/src/Main.java | 137 + test/485-checker-dce-loop-update/expected.txt | 0 test/485-checker-dce-loop-update/info.txt | 2 + .../smali/TestCase.smali | 288 + .../485-checker-dce-loop-update/src/Main.java | 27 + test/485-checker-dce-switch/expected.txt | 0 test/485-checker-dce-switch/info.txt | 1 + test/485-checker-dce-switch/src/Main.java | 192 + .../expected.txt | 0 test/486-checker-must-do-null-check/info.txt | 1 + .../src/Main.java | 53 + test/487-checker-inline-calls/expected.txt | 6 + test/487-checker-inline-calls/info.txt | 1 + test/487-checker-inline-calls/src/Main.java | 50 + .../expected.txt | 7 + .../info.txt | 1 + .../src/Main.java | 53 + .../expected.txt | 1 + test/489-current-method-regression/info.txt | 2 + .../src/Main.java | 34 + test/490-checker-inline/expected.txt | 0 test/490-checker-inline/info.txt | 1 + test/490-checker-inline/src/Main.java | 52 + test/491-current-method/expected.txt | 0 test/491-current-method/info.txt | 2 + test/491-current-method/src/Main.java | 70 + .../expected.txt | 5 + .../info.txt | 1 + .../src/Main.java | 52 + .../expected.txt | 5 + .../info.txt | 2 + .../src/Main.java | 48 + .../494-checker-instanceof-tests/expected.txt | 0 test/494-checker-instanceof-tests/info.txt | 1 + .../src/Main.java | 223 + test/495-checker-checkcast-tests/expected.txt | 0 test/495-checker-checkcast-tests/info.txt | 1 + .../495-checker-checkcast-tests/src/Main.java | 231 + .../expected.txt | 4 + .../info.txt | 2 + .../src/FirstSeenByMyClassLoader.java | 26 + .../src/Main.java | 134 + .../clear_dex_cache.cc | 96 + .../expected.txt | 8 + test/497-inlining-and-class-loader/info.txt | 2 + .../src/Level1.java | 27 + .../src/Main.java | 130 + test/498-type-propagation/expected.txt | 1 + test/498-type-propagation/info.txt | 2 + .../smali/TypePropagation.smali | 30 + test/498-type-propagation/src/Main.java | 29 + test/499-bce-phi-array-length/expected.txt | 0 test/499-bce-phi-array-length/info.txt | 2 + test/499-bce-phi-array-length/src/Main.java | 64 + test/500-instanceof/expected.txt | 0 test/500-instanceof/info.txt | 2 + test/500-instanceof/src/Main.java | 32 + test/501-null-constant-dce/expected.txt | 1 + test/501-null-constant-dce/info.txt | 1 + test/501-null-constant-dce/smali/DCE.smali | 37 + test/501-null-constant-dce/src/Main.java | 32 + .../501-regression-packed-switch/expected.txt | 0 test/501-regression-packed-switch/info.txt | 4 + .../smali/Test.smali | 54 + .../src/Main.java | 38 + test/503-dead-instructions/expected.txt | 1 + test/503-dead-instructions/info.txt | 2 + .../smali/DeadInstructions.smali | 63 + test/503-dead-instructions/src/Main.java | 40 + .../expected.txt | 0 test/504-regression-baseline-entry/info.txt | 2 + .../smali/Test.smali | 30 + .../src/Main.java | 33 + .../expected.txt | 0 test/505-simplifier-type-propagation/info.txt | 3 + .../src/Main.java | 38 + test/506-verify-aput/expected.txt | 1 + test/506-verify-aput/info.txt | 2 + test/506-verify-aput/smali/VerifyAPut1.smali | 26 + test/506-verify-aput/smali/VerifyAPut2.smali | 25 + test/506-verify-aput/src/Main.java | 34 + test/507-boolean-test/expected.txt | 0 test/507-boolean-test/info.txt | 2 + test/507-boolean-test/src/Main.java | 27 + test/507-referrer/expected.txt | 0 test/507-referrer/info.txt | 2 + test/507-referrer/src/Main.java | 26 + test/507-referrer/src/p1/InPackage.java | 25 + test/508-checker-disassembly/expected.txt | 0 test/508-checker-disassembly/info.txt | 1 + test/508-checker-disassembly/src/Main.java | 29 + test/508-referrer-method/expected.txt | 0 test/508-referrer-method/info.txt | 2 + test/508-referrer-method/src/Main.java | 26 + .../508-referrer-method/src/p1/InPackage.java | 26 + .../src/p1/PackagePrivateA.java | 23 + test/508-referrer-method/src/p1/PublicB.java | 20 + test/508-referrer-method/src/p1/PublicC.java | 20 + test/509-pre-header/expected.txt | 1 + test/509-pre-header/info.txt | 3 + test/509-pre-header/smali/PreHeader.smali | 39 + test/509-pre-header/src/Main.java | 28 + test/510-checker-try-catch/expected.txt | 0 test/510-checker-try-catch/info.txt | 1 + .../510-checker-try-catch/smali/Builder.smali | 1387 +++ .../smali/RegisterAllocator.smali | 94 + .../510-checker-try-catch/smali/Runtime.smali | 625 ++ .../smali/SsaBuilder.smali | 232 + test/510-checker-try-catch/src/Main.java | 187 + test/511-clinit-interface/expected.txt | 1 + test/511-clinit-interface/info.txt | 2 + .../smali/BogusInterface.smali | 23 + test/511-clinit-interface/src/Main.java | 28 + test/513-array-deopt/expected.txt | 0 test/513-array-deopt/info.txt | 2 + test/513-array-deopt/src/Main.java | 54 + test/514-shifts/expected.txt | 0 test/514-shifts/info.txt | 2 + test/514-shifts/src/Main.java | 106 + test/515-dce-dominator/expected.txt | 1 + test/515-dce-dominator/info.txt | 3 + test/515-dce-dominator/smali/Dominator.smali | 37 + test/515-dce-dominator/src/Main.java | 28 + test/516-dead-move-result/expected.txt | 0 test/516-dead-move-result/info.txt | 3 + .../smali/MoveResult.smali | 25 + test/516-dead-move-result/src/Main.java | 29 + .../expected.txt | 0 test/517-checker-builder-fallthrough/info.txt | 2 + .../smali/TestCase.smali | 67 + .../src/Main.java | 32 + test/518-null-array-get/expected.txt | 6 + test/518-null-array-get/info.txt | 9 + .../smali/NullArrayFailInt2Object.smali | 28 + .../smali/NullArrayFailObject2Int.smali | 28 + .../smali/NullArraySuccessInt.smali | 33 + .../smali/NullArraySuccessInt2Float.smali | 33 + .../smali/NullArraySuccessRef.smali | 33 + .../smali/NullArraySuccessShort.smali | 33 + test/518-null-array-get/src/Main.java | 57 + test/519-bound-load-class/expected.txt | 0 test/519-bound-load-class/info.txt | 3 + test/519-bound-load-class/src/Main.java | 39 + test/520-equivalent-phi/expected.txt | 0 test/520-equivalent-phi/info.txt | 2 + .../520-equivalent-phi/smali/Equivalent.smali | 35 + test/520-equivalent-phi/src/Main.java | 30 + test/521-checker-array-set-null/expected.txt | 0 test/521-checker-array-set-null/info.txt | 2 + test/521-checker-array-set-null/src/Main.java | 41 + .../expected.txt | 0 .../521-regression-integer-field-set/info.txt | 3 + .../src/Main.java | 58 + .../expected.txt | 0 .../info.txt | 3 + .../smali/Test.smali | 40 + .../src/Main.java | 84 + .../expected.txt | 0 .../523-checker-can-throw-regression/info.txt | 2 + .../smali/Test.smali | 55 + .../src/Main.java | 35 + .../expected.txt | 1 + .../info.txt | 1 + .../src/Main.java | 37 + test/525-checker-arrays-fields1/expected.txt | 1 + test/525-checker-arrays-fields1/info.txt | 1 + test/525-checker-arrays-fields1/src/Main.java | 711 ++ test/525-checker-arrays-fields2/expected.txt | 1 + test/525-checker-arrays-fields2/info.txt | 1 + test/525-checker-arrays-fields2/src/Main.java | 711 ++ .../expected.txt | 0 test/526-checker-caller-callee-regs/info.txt | 1 + .../src/Main.java | 89 + test/526-long-regalloc/expected.txt | 0 test/526-long-regalloc/info.txt | 2 + test/526-long-regalloc/src/Main.java | 72 + .../expected.txt | 0 test/527-checker-array-access-simd/info.txt | 1 + .../src/Main.java | 223 + .../expected.txt | 0 test/527-checker-array-access-split/info.txt | 1 + .../src/Main.java | 675 ++ test/528-long-hint/expected.txt | 0 test/528-long-hint/info.txt | 2 + test/528-long-hint/src/Main.java | 45 + test/529-checker-unresolved/expected.txt | 10 + test/529-checker-unresolved/info.txt | 1 + test/529-checker-unresolved/run | 18 + .../UnresolvedClass.java | 50 + .../UnresolvedInterface.java | 19 + .../UnresolvedSuperClass.java | 21 + test/529-checker-unresolved/src/Main.java | 255 + test/529-long-split/expected.txt | 0 test/529-long-split/info.txt | 2 + test/529-long-split/src/Main.java | 185 + test/530-checker-loops1/expected.txt | 1 + test/530-checker-loops1/info.txt | 1 + test/530-checker-loops1/src/Main.java | 822 ++ test/530-checker-loops2/expected.txt | 1 + test/530-checker-loops2/info.txt | 1 + test/530-checker-loops2/src/Main.java | 1257 +++ test/530-checker-loops3/expected.txt | 1 + test/530-checker-loops3/info.txt | 1 + test/530-checker-loops3/src/Main.java | 412 + test/530-checker-loops4/expected.txt | 1 + test/530-checker-loops4/info.txt | 1 + test/530-checker-loops4/src/Main.java | 371 + test/530-checker-loops5/expected.txt | 1 + test/530-checker-loops5/info.txt | 1 + test/530-checker-loops5/src/Main.java | 186 + test/530-checker-lse-ctor-fences/expected.txt | 0 test/530-checker-lse-ctor-fences/info.txt | 1 + .../smali/Smali.smali | 97 + .../530-checker-lse-ctor-fences/src/Main.java | 196 + test/530-checker-lse-simd/expected.txt | 7 + test/530-checker-lse-simd/info.txt | 1 + test/530-checker-lse-simd/src/Main.java | 267 + test/530-checker-lse/expected.txt | 2 + test/530-checker-lse/info.txt | 1 + test/530-checker-lse/smali/Main.smali | 292 + test/530-checker-lse/src/Main.java | 1292 +++ test/530-checker-lse2/expected.txt | 8 + test/530-checker-lse2/info.txt | 2 + test/530-checker-lse2/src/Main.java | 224 + test/530-checker-lse3/expected.txt | 0 test/530-checker-lse3/info.txt | 4 + test/530-checker-lse3/smali/StoreLoad.smali | 62 + test/530-checker-lse3/src/Main.java | 48 + test/530-checker-peel-unroll/expected.txt | 1 + test/530-checker-peel-unroll/info.txt | 1 + .../smali/PeelUnroll.smali | 232 + test/530-checker-peel-unroll/src/Main.java | 1082 ++ .../expected.txt | 0 .../info.txt | 1 + .../smali/TestCase.smali | 59 + .../src/Main.java | 66 + test/530-instanceof-checkcast/expected.txt | 0 test/530-instanceof-checkcast/info.txt | 1 + test/530-instanceof-checkcast/src/Main.java | 248 + test/530-regression-lse/expected.txt | 0 test/530-regression-lse/info.txt | 2 + test/530-regression-lse/src/Main.java | 55 + test/531-regression-debugphi/expected.txt | 0 test/531-regression-debugphi/info.txt | 2 + .../smali/TestCase.smali | 121 + test/531-regression-debugphi/src/Main.java | 22 + .../532-checker-nonnull-arrayset/expected.txt | 0 test/532-checker-nonnull-arrayset/info.txt | 1 + .../src/Main.java | 41 + test/533-regression-debugphi/expected.txt | 0 test/533-regression-debugphi/info.txt | 2 + .../smali/TestCase.smali | 72 + test/533-regression-debugphi/src/Main.java | 22 + .../expected.txt | 5 + test/534-checker-bce-deoptimization/info.txt | 8 + .../src/Main.java | 131 + test/535-deopt-and-inlining/expected.txt | 0 test/535-deopt-and-inlining/info.txt | 2 + test/535-deopt-and-inlining/src/Main.java | 55 + test/535-regression-const-val/expected.txt | 0 test/535-regression-const-val/info.txt | 2 + .../smali/TestCase.smali | 36 + test/535-regression-const-val/src/Main.java | 22 + .../expected.txt | 0 .../info.txt | 0 .../smali/SmaliTests.smali | 102 + .../src/Main.java | 466 + .../expected.txt | 4 + test/536-checker-needs-access-check/info.txt | 1 + .../src/Main.java | 82 + .../src/other/InaccessibleClass.java | 20 + .../src/other/InaccessibleClassProxy.java | 23 + .../src2/other/InaccessibleClass.java | 20 + .../src2/other/InaccessibleClassProxy.java | 23 + test/537-checker-arraycopy/expected.txt | 0 test/537-checker-arraycopy/info.txt | 1 + test/537-checker-arraycopy/src/Main.java | 100 + test/537-checker-debuggable/expected.txt | 0 test/537-checker-debuggable/info.txt | 1 + .../smali/TestCase.smali | 42 + test/537-checker-debuggable/src/Main.java | 23 + .../expected.txt | 0 .../info.txt | 1 + .../src/Main.java | 59 + .../src/other/InaccessibleClass.java | 20 + .../src2/other/InaccessibleClass.java | 20 + test/537-checker-jump-over-jump/expected.txt | 0 test/537-checker-jump-over-jump/info.txt | 1 + test/537-checker-jump-over-jump/src/Main.java | 56 + test/538-checker-embed-constants/expected.txt | 0 test/538-checker-embed-constants/info.txt | 1 + .../538-checker-embed-constants/src/Main.java | 753 ++ test/540-checker-rtp-bug/expected.txt | 1 + test/540-checker-rtp-bug/info.txt | 1 + test/540-checker-rtp-bug/src/Main.java | 102 + .../541-regression-inlined-deopt/expected.txt | 0 test/541-regression-inlined-deopt/info.txt | 4 + .../smali/TestCase.smali | 55 + .../src/Main.java | 36 + test/542-bitfield-rotates/expected.txt | 0 test/542-bitfield-rotates/info.txt | 1 + test/542-bitfield-rotates/src/Main.java | 423 + test/542-inline-trycatch/expected.txt | 0 test/542-inline-trycatch/info.txt | 1 + test/542-inline-trycatch/src/Main.java | 178 + test/542-unresolved-access-check/expected.txt | 0 test/542-unresolved-access-check/info.txt | 1 + .../542-unresolved-access-check/src/Main.java | 104 + .../src/p1/InP1.java | 93 + .../src/p1/OtherInP1.java | 32 + .../src/p1/PlaceHolder.java | 24 + test/543-checker-dce-trycatch/expected.txt | 0 test/543-checker-dce-trycatch/info.txt | 1 + .../smali/TestCase.smali | 333 + test/543-checker-dce-trycatch/src/Main.java | 66 + test/543-env-long-ref/env_long_ref.cc | 55 + test/543-env-long-ref/expected.txt | 2 + test/543-env-long-ref/info.txt | 3 + test/543-env-long-ref/smali/TestCase.smali | 26 + test/543-env-long-ref/src/Main.java | 42 + test/545-tracing-and-jit/expected.txt | 0 test/545-tracing-and-jit/info.txt | 2 + test/545-tracing-and-jit/src/Main.java | 251 + .../expected.txt | 0 test/546-regression-simplify-catch/info.txt | 2 + .../smali/TestCase.smali | 104 + .../src/Main.java | 24 + .../expected.txt | 0 .../info.txt | 2 + .../smali/TestCase.smali | 57 + .../src/Main.java | 24 + .../548-checker-inlining-and-dce/expected.txt | 0 test/548-checker-inlining-and-dce/info.txt | 1 + .../src/Main.java | 87 + test/549-checker-types-merge/expected.txt | 0 test/549-checker-types-merge/info.txt | 1 + test/549-checker-types-merge/src/Main.java | 130 + .../expected.txt | 0 test/550-checker-multiply-accumulate/info.txt | 1 + .../src/Main.java | 570 + .../expected.txt | 0 .../info.txt | 3 + .../smali/TestCase.smali | 82 + .../src/Main.java | 40 + test/550-new-instance-clinit/expected.txt | 0 test/550-new-instance-clinit/info.txt | 3 + test/550-new-instance-clinit/src/Main.java | 33 + test/551-checker-clinit/expected.txt | 0 test/551-checker-clinit/info.txt | 1 + test/551-checker-clinit/src/Main.java | 93 + test/551-checker-shifter-operand/expected.txt | 0 test/551-checker-shifter-operand/info.txt | 1 + .../551-checker-shifter-operand/src/Main.java | 1128 ++ test/551-implicit-null-checks/expected.txt | 4 + test/551-implicit-null-checks/info.txt | 1 + test/551-implicit-null-checks/src/Main.java | 68 + test/551-invoke-super/expected.txt | 0 test/551-invoke-super/info.txt | 1 + test/551-invoke-super/smali/invokesuper.smali | 40 + test/551-invoke-super/smali/superclass.smali | 26 + test/551-invoke-super/src/Main.java | 36 + .../expected.txt | 0 test/552-checker-primitive-typeprop/info.txt | 2 + .../smali/ArrayGet.smali | 245 + .../smali/ArraySet.smali | 51 + .../smali/SsaBuilder.smali | 52 + .../smali/TypePropagation.smali | 136 + .../src/Main.java | 43 + test/552-checker-sharpening/expected.txt | 0 test/552-checker-sharpening/info.txt | 1 + test/552-checker-sharpening/src/Main.java | 226 + .../expected.txt | 0 .../info.txt | 1 + .../src/Main.java | 225 + .../expected.txt | 0 test/552-invoke-non-existent-super/info.txt | 1 + .../smali/invokesuper.smali | 40 + .../smali/superclass.smali | 23 + .../src/Main.java | 36 + test/553-invoke-super/expected.txt | 0 test/553-invoke-super/info.txt | 1 + test/553-invoke-super/smali/invokesuper.smali | 40 + test/553-invoke-super/src/Main.java | 31 + test/553-invoke-super/src/SuperClass.java | 26 + test/554-checker-rtp-checkcast/expected.txt | 0 test/554-checker-rtp-checkcast/info.txt | 1 + test/554-checker-rtp-checkcast/src/Main.java | 73 + .../555-UnsafeGetLong-regression/expected.txt | 1 + test/555-UnsafeGetLong-regression/info.txt | 2 + .../src/Main.java | 52 + test/556-invoke-super/expected.txt | 0 test/556-invoke-super/info.txt | 1 + test/556-invoke-super/smali/invokesuper.smali | 40 + .../src-multidex/SuperClass.java | 21 + test/556-invoke-super/src/Main.java | 35 + .../expected.txt | 0 .../info.txt | 1 + .../src/Main.java | 663 ++ test/557-checker-ref-equivalent/expected.txt | 0 test/557-checker-ref-equivalent/info.txt | 1 + .../smali/TestCase.smali | 51 + test/557-checker-ref-equivalent/src/Main.java | 47 + test/558-switch/expected.txt | 0 test/558-switch/info.txt | 2 + test/558-switch/src/Main.java | 35 + test/559-bce-ssa/expected.txt | 0 test/559-bce-ssa/info.txt | 2 + test/559-bce-ssa/src/Main.java | 32 + .../559-checker-irreducible-loop/expected.txt | 7 + test/559-checker-irreducible-loop/info.txt | 1 + .../smali/IrreducibleLoop.smali | 549 + .../src/Main.java | 72 + test/559-checker-rtp-ifnotnull/expected.txt | 0 test/559-checker-rtp-ifnotnull/info.txt | 2 + test/559-checker-rtp-ifnotnull/src/Main.java | 54 + test/560-packed-switch/expected.txt | 0 test/560-packed-switch/info.txt | 2 + test/560-packed-switch/src/Main.java | 31 + test/561-divrem/expected.txt | 0 test/561-divrem/info.txt | 2 + test/561-divrem/src/Main.java | 103 + test/561-shared-slowpaths/expected.txt | 1 + test/561-shared-slowpaths/info.txt | 1 + test/561-shared-slowpaths/src/Main.java | 154 + test/562-bce-preheader/expected.txt | 1 + test/562-bce-preheader/info.txt | 1 + test/562-bce-preheader/src/Main.java | 152 + test/562-checker-no-intermediate/expected.txt | 0 test/562-checker-no-intermediate/info.txt | 2 + .../562-checker-no-intermediate/src/Main.java | 101 + test/563-checker-fakestring/expected.txt | 1 + test/563-checker-fakestring/info.txt | 2 + .../smali/TestCase.smali | 475 + test/563-checker-fakestring/src/Main.java | 172 + test/563-checker-invoke-super/expected.txt | 0 test/563-checker-invoke-super/info.txt | 2 + test/563-checker-invoke-super/src/Main.java | 39 + test/564-checker-bitcount/expected.txt | 1 + test/564-checker-bitcount/info.txt | 1 + test/564-checker-bitcount/src/Main.java | 206 + test/564-checker-inline-loop/expected.txt | 0 test/564-checker-inline-loop/info.txt | 1 + test/564-checker-inline-loop/src/Main.java | 64 + .../564-checker-irreducible-loop/expected.txt | 1 + test/564-checker-irreducible-loop/info.txt | 2 + .../smali/IrreducibleLoop.smali | 60 + .../src/Main.java | 29 + test/564-checker-negbitwise/expected.txt | 0 test/564-checker-negbitwise/info.txt | 1 + test/564-checker-negbitwise/src/Main.java | 300 + .../expected.txt | 0 test/565-checker-condition-liveness/info.txt | 1 + .../src/Main.java | 194 + .../565-checker-doublenegbitwise/expected.txt | 0 test/565-checker-doublenegbitwise/info.txt | 1 + .../smali/SmaliTests.smali | 993 ++ .../src/Main.java | 73 + .../565-checker-irreducible-loop/expected.txt | 2 + test/565-checker-irreducible-loop/info.txt | 2 + .../smali/IrreducibleLoop.smali | 101 + .../src/Main.java | 37 + test/565-checker-rotate/expected.txt | 1 + test/565-checker-rotate/info.txt | 1 + test/565-checker-rotate/smali/Main2.smali | 163 + test/565-checker-rotate/src-art/Main.java | 534 + test/565-checker-rotate/src/Main.java | 22 + test/566-checker-codegen-select/expected.txt | 0 test/566-checker-codegen-select/info.txt | 1 + test/566-checker-codegen-select/src/Main.java | 98 + test/566-checker-signum/expected.txt | 1 + test/566-checker-signum/info.txt | 1 + test/566-checker-signum/smali/Main2.smali | 82 + test/566-checker-signum/src-art/Main.java | 196 + test/566-checker-signum/src/Main.java | 22 + test/566-polymorphic-inlining/expected.txt | 1 + test/566-polymorphic-inlining/info.txt | 1 + .../polymorphic_inline.cc | 91 + test/566-polymorphic-inlining/run | 20 + test/566-polymorphic-inlining/src/Main.java | 156 + test/567-checker-compare/expected.txt | 1 + test/567-checker-compare/info.txt | 1 + test/567-checker-compare/smali/Smali.smali | 91 + test/567-checker-compare/src/Main.java | 929 ++ test/568-checker-onebit/expected.txt | 1 + test/568-checker-onebit/info.txt | 1 + test/568-checker-onebit/src/Main.java | 103 + .../expected.txt | 0 test/569-checker-pattern-replacement/info.txt | 1 + test/569-checker-pattern-replacement/run | 18 + .../src-multidex/Base.java | 70 + .../src-multidex/BaseWithFinalField.java | 27 + .../src-multidex/Derived.java | 63 + .../src-multidex/DerivedInSecondDex.java | 31 + .../src-multidex/DerivedWithFinalField.java | 38 + .../src-multidex/Second.java | 103 + .../src/BaseInMainDex.java | 26 + .../src/Main.java | 1259 +++ test/570-checker-osr-locals/expected.txt | 1 + test/570-checker-osr-locals/info.txt | 0 test/570-checker-osr-locals/run | 18 + .../smali/WeirdLoop.smali | 39 + test/570-checker-osr-locals/src/Main.java | 123 + test/570-checker-osr/expected.txt | 6 + test/570-checker-osr/info.txt | 0 test/570-checker-osr/osr.cc | 138 + test/570-checker-osr/run | 18 + test/570-checker-osr/smali/Osr.smali | 35 + .../src/DeoptimizationController.java | 104 + test/570-checker-osr/src/Main.java | 320 + test/570-checker-select/expected.txt | 0 test/570-checker-select/info.txt | 1 + test/570-checker-select/src/Main.java | 715 ++ test/571-irreducible-loop/expected.txt | 1 + test/571-irreducible-loop/info.txt | 2 + .../smali/IrreducibleLoop.smali | 47 + test/571-irreducible-loop/src/Main.java | 29 + .../expected.txt | 1 + .../572-checker-array-get-regression/info.txt | 3 + .../src/Main.java | 59 + .../expected.txt | 1 + .../573-checker-checkcast-regression/info.txt | 4 + .../src/Main.java | 49 + .../expected.txt | 0 .../info.txt | 3 + test/574-irreducible-and-constant-area/run | 18 + .../smali/IrreducibleLoop.smali | 35 + .../src/Main.java | 41 + test/575-checker-isnan/expected.txt | 1 + test/575-checker-isnan/info.txt | 1 + test/575-checker-isnan/src/Main.java | 126 + .../expected.txt | 1 + test/575-checker-string-init-alias/info.txt | 2 + .../smali/TestCase.smali | 72 + .../src/Main.java | 73 + test/576-polymorphic-inlining/expected.txt | 0 test/576-polymorphic-inlining/info.txt | 1 + test/576-polymorphic-inlining/src/Main.java | 103 + test/577-checker-fp2int/expected.txt | 1 + test/577-checker-fp2int/info.txt | 1 + test/577-checker-fp2int/src/Main.java | 124 + test/578-bce-visit/expected.txt | 2 + test/578-bce-visit/info.txt | 1 + test/578-bce-visit/src/Main.java | 60 + test/578-polymorphic-inlining/expected.txt | 0 test/578-polymorphic-inlining/info.txt | 2 + test/578-polymorphic-inlining/src/Main.java | 56 + test/579-inline-infinite/expected.txt | 0 test/579-inline-infinite/info.txt | 2 + test/579-inline-infinite/src/Main.java | 38 + test/580-checker-round/expected.txt | 1 + test/580-checker-round/info.txt | 1 + test/580-checker-round/src/Main.java | 210 + .../expected.txt | 3 + .../info.txt | 1 + .../src-art/Main.java | 74 + test/580-crc32/expected.txt | 0 test/580-crc32/info.txt | 1 + test/580-crc32/src/Main.java | 536 + test/580-fp16/expected.txt | 0 test/580-fp16/info.txt | 1 + test/580-fp16/src-art/Main.java | 360 + test/581-rtp/expected.txt | 0 test/581-rtp/info.txt | 2 + test/581-rtp/src/Main.java | 44 + test/582-checker-bce-length/expected.txt | 1 + test/582-checker-bce-length/info.txt | 1 + test/582-checker-bce-length/src/Main.java | 99 + test/583-checker-zero/expected.txt | 0 test/583-checker-zero/info.txt | 2 + test/583-checker-zero/src/Main.java | 30 + test/584-checker-div-bool/expected.txt | 0 test/584-checker-div-bool/info.txt | 2 + test/584-checker-div-bool/src/Main.java | 41 + test/585-inline-unresolved/expected.txt | 0 test/585-inline-unresolved/info.txt | 2 + .../smali/TestCase.smali | 48 + test/585-inline-unresolved/src/Main.java | 22 + test/586-checker-null-array-get/expected.txt | 0 test/586-checker-null-array-get/info.txt | 3 + .../smali/SmaliTests.smali | 167 + test/586-checker-null-array-get/src/Main.java | 143 + test/587-inline-class-error/expected.txt | 0 test/587-inline-class-error/info.txt | 2 + .../smali/SuperVerifyError.smali | 27 + .../smali/TestCase.smali | 33 + .../smali/VerifyError.smali | 28 + test/587-inline-class-error/src/Main.java | 37 + .../expected.txt | 2 + .../info.txt | 3 + .../smali/IrreducibleLoop.smali | 116 + .../src/Main.java | 36 + test/589-super-imt/expected.txt | 0 test/589-super-imt/info.txt | 2 + test/589-super-imt/src/Main.java | 447 + .../expected.txt | 1 + .../info.txt | 11 + .../src/Main.java | 68 + test/590-infinite-loop-with-nop/expected.txt | 0 test/590-infinite-loop-with-nop/info.txt | 1 + .../smali/TestCase.smali | 28 + test/590-infinite-loop-with-nop/src/Main.java | 21 + .../expected.txt | 0 .../591-checker-regression-dead-loop/info.txt | 2 + .../src/Main.java | 35 + test/591-new-instance-string/expected.txt | 1 + test/591-new-instance-string/info.txt | 1 + .../smali/new-instance.smali | 29 + test/591-new-instance-string/src/Main.java | 28 + .../expected.txt | 0 .../info.txt | 2 + .../smali/TestCase.smali | 50 + .../src/Main.java | 61 + .../expected.txt | 1 + .../info.txt | 1 + .../smali/SmaliTests.smali | 250 + .../src/Main.java | 197 + .../expected.txt | 0 .../info.txt | 3 + .../src/Main.java | 51 + .../expected.txt | 2 + .../593-checker-shift-and-simplifier/info.txt | 1 + .../smali/SmaliTests.smali | 58 + .../src/Main.java | 66 + test/594-checker-array-alias/expected.txt | 1 + test/594-checker-array-alias/info.txt | 1 + test/594-checker-array-alias/src/Main.java | 255 + .../expected.txt | 0 .../594-checker-irreducible-linorder/info.txt | 2 + .../smali/IrreducibleLoop.smali | 123 + .../src/Main.java | 25 + test/594-invoke-super/expected.txt | 7 + test/594-invoke-super/info.txt | 1 + .../594-invoke-super/smali/invoke-super.smali | 31 + test/594-invoke-super/src/Main.java | 80 + test/594-load-string-regression/expected.txt | 1 + test/594-load-string-regression/info.txt | 2 + test/594-load-string-regression/src/Main.java | 77 + test/595-error-class/expected.txt | 1 + test/595-error-class/info.txt | 1 + test/595-error-class/smali/error.smali | 23 + test/595-error-class/smali/merge.smali | 31 + test/595-error-class/smali/super.smali | 22 + test/595-error-class/src/Main.java | 30 + test/595-profile-saving/expected.txt | 2 + test/595-profile-saving/info.txt | 1 + test/595-profile-saving/profile-saving.cc | 78 + test/595-profile-saving/run | 30 + test/595-profile-saving/src/Main.java | 110 + test/596-app-images/app_images.cc | 76 + test/596-app-images/expected.txt | 1 + test/596-app-images/info.txt | 1 + test/596-app-images/src/Main.java | 155 + test/596-checker-dead-phi/expected.txt | 1 + test/596-checker-dead-phi/info.txt | 2 + .../smali/IrreducibleLoop.smali | 74 + test/596-checker-dead-phi/src/Main.java | 32 + test/596-monitor-inflation/expected.txt | 6 + test/596-monitor-inflation/info.txt | 5 + .../monitor_inflation.cc | 35 + test/596-monitor-inflation/src-art/Main.java | 79 + test/597-deopt-busy-loop/expected.txt | 4 + test/597-deopt-busy-loop/info.txt | 1 + test/597-deopt-busy-loop/run | 18 + test/597-deopt-busy-loop/src/FloatLoop.java | 97 + test/597-deopt-busy-loop/src/Main.java | 39 + test/597-deopt-busy-loop/src/SimdLoop.java | 96 + test/597-deopt-busy-loop/src/SimpleLoop.java | 62 + test/597-deopt-invoke-stub/expected.txt | 2 + test/597-deopt-invoke-stub/info.txt | 1 + test/597-deopt-invoke-stub/run | 21 + test/597-deopt-invoke-stub/src/Main.java | 93 + test/597-deopt-new-string/deopt.cc | 61 + test/597-deopt-new-string/expected.txt | 2 + test/597-deopt-new-string/info.txt | 1 + test/597-deopt-new-string/run | 18 + test/597-deopt-new-string/src/Main.java | 84 + .../expected.txt | 0 .../info.txt | 2 + .../smali/IrreducibleLoop.smali | 52 + .../src/Main.java | 25 + .../599-checker-irreducible-loop/expected.txt | 1 + test/599-checker-irreducible-loop/info.txt | 2 + .../smali/IrreducibleLoop.smali | 56 + .../src/Main.java | 30 + test/600-verifier-fails/expected.txt | 6 + test/600-verifier-fails/info.txt | 23 + test/600-verifier-fails/smali/class.smali | 24 + test/600-verifier-fails/smali/construct.smali | 25 + test/600-verifier-fails/smali/iget.smali | 25 + test/600-verifier-fails/smali/invoke.smali | 25 + test/600-verifier-fails/smali/iput.smali | 25 + test/600-verifier-fails/smali/sput.smali | 23 + test/600-verifier-fails/src/Main.java | 44 + test/601-method-access/expected.txt | 1 + test/601-method-access/info.txt | 1 + .../SubClassUsingInaccessibleMethod.smali | 33 + test/601-method-access/src/Main.java | 38 + .../src/other/ProtectedClass.java | 24 + .../src/other/PublicClass.java | 21 + test/602-deoptimizeable/expected.txt | 2 + test/602-deoptimizeable/info.txt | 1 + test/602-deoptimizeable/src/Main.java | 189 + test/603-checker-instanceof/expected.txt | 0 test/603-checker-instanceof/info.txt | 2 + test/603-checker-instanceof/src/Main.java | 82 + test/604-hot-static-interface/expected.txt | 1 + test/604-hot-static-interface/info.txt | 2 + test/604-hot-static-interface/src/Main.java | 39 + test/605-new-string-from-bytes/expected.txt | 0 test/605-new-string-from-bytes/info.txt | 2 + test/605-new-string-from-bytes/src/Main.java | 45 + test/606-erroneous-class/expected.txt | 0 test/606-erroneous-class/info.txt | 3 + .../jasmin-multidex/ClassA.j | 30 + test/606-erroneous-class/smali/ClassB.smali | 18 + test/606-erroneous-class/smali/ErrClass.smali | 26 + test/606-erroneous-class/src/Main.java | 21 + test/607-daemon-stress/expected.txt | 0 test/607-daemon-stress/info.txt | 3 + test/607-daemon-stress/src/Main.java | 31 + test/608-checker-unresolved-lse/expected.txt | 0 test/608-checker-unresolved-lse/info.txt | 3 + test/608-checker-unresolved-lse/run | 18 + .../MissingSuperClass.java | 18 + test/608-checker-unresolved-lse/src/Main.java | 126 + .../609-checker-inline-interface/expected.txt | 0 test/609-checker-inline-interface/info.txt | 2 + .../src/Main.java | 78 + .../609-checker-x86-bounds-check/expected.txt | 1 + test/609-checker-x86-bounds-check/info.txt | 1 + .../src/Main.java | 88 + test/610-arraycopy/expected.txt | 0 test/610-arraycopy/info.txt | 2 + test/610-arraycopy/src/Main.java | 44 + test/611-checker-simplify-if/expected.txt | 7 + test/611-checker-simplify-if/info.txt | 1 + test/611-checker-simplify-if/src/Main.java | 281 + test/612-jit-dex-cache/expected.txt | 1 + test/612-jit-dex-cache/info.txt | 2 + test/612-jit-dex-cache/src-art/A.java | 21 + test/612-jit-dex-cache/src-art/B.java | 18 + test/612-jit-dex-cache/src-art/Main.java | 67 + test/612-jit-dex-cache/src-ex/B.java | 18 + .../src-ex/LoadedByAppClassLoader.java | 36 + test/613-inlining-dex-cache/expected.txt | 1 + test/613-inlining-dex-cache/info.txt | 2 + test/613-inlining-dex-cache/run | 20 + test/613-inlining-dex-cache/src-art/B.java | 20 + test/613-inlining-dex-cache/src-art/Main.java | 85 + test/613-inlining-dex-cache/src-ex/B.java | 18 + .../src-ex/LoadedByAppClassLoader.java | 22 + .../expected.txt | 0 .../info.txt | 2 + .../src/Main.java | 42 + .../615-checker-arm64-store-zero/expected.txt | 0 test/615-checker-arm64-store-zero/info.txt | 1 + .../src/Main.java | 472 + test/616-cha-abstract/expected.txt | 1 + test/616-cha-abstract/info.txt | 1 + test/616-cha-abstract/run | 18 + test/616-cha-abstract/src/Main.java | 159 + test/616-cha-interface-default/build | 17 + test/616-cha-interface-default/expected.txt | 1 + test/616-cha-interface-default/info.txt | 2 + test/616-cha-interface-default/run | 18 + .../src-multidex/Base.java | 41 + test/616-cha-interface-default/src/Main.java | 176 + test/616-cha-interface/expected.txt | 1 + test/616-cha-interface/info.txt | 1 + test/616-cha-interface/run | 18 + test/616-cha-interface/src/Main.java | 173 + test/616-cha-miranda/expected.txt | 1 + test/616-cha-miranda/info.txt | 1 + test/616-cha-miranda/run | 18 + test/616-cha-miranda/src/Main.java | 163 + test/616-cha-native/expected.txt | 1 + test/616-cha-native/info.txt | 2 + test/616-cha-native/src/Main.java | 33 + test/616-cha-proxy-method-inline/expected.txt | 1 + test/616-cha-proxy-method-inline/info.txt | 1 + test/616-cha-proxy-method-inline/run | 18 + .../src-multidex/Foo.java | 19 + .../616-cha-proxy-method-inline/src/Main.java | 70 + .../expected.txt | 1 + test/616-cha-regression-proxy-method/info.txt | 1 + .../src/Main.java | 131 + test/616-cha-unloading/cha_unload.cc | 88 + test/616-cha-unloading/expected.txt | 2 + test/616-cha-unloading/info.txt | 1 + test/616-cha-unloading/run | 18 + .../src-art/AbstractCHATester.java | 19 + test/616-cha-unloading/src-art/Main.java | 121 + .../src-ex/AbstractCHATester.java | 19 + .../src-ex/ConcreteCHATester.java | 19 + test/616-cha/expected.txt | 1 + test/616-cha/info.txt | 1 + test/616-cha/run | 18 + test/616-cha/src/Main.java | 253 + test/617-clinit-oome/expected.txt | 1 + test/617-clinit-oome/info.txt | 1 + test/617-clinit-oome/src/Main.java | 43 + test/617-clinit-oome/src/Other.java | 28 + test/618-checker-induction/expected.txt | 1 + test/618-checker-induction/info.txt | 1 + test/618-checker-induction/src/Main.java | 948 ++ test/619-checker-current-method/expected.txt | 0 test/619-checker-current-method/info.txt | 2 + test/619-checker-current-method/src/Main.java | 33 + test/620-checker-bce-intrinsics/expected.txt | 1 + test/620-checker-bce-intrinsics/info.txt | 1 + test/620-checker-bce-intrinsics/src/Main.java | 285 + test/622-checker-bce-regressions/expected.txt | 1 + test/622-checker-bce-regressions/info.txt | 1 + .../622-checker-bce-regressions/src/Main.java | 73 + .../expected.txt | 0 test/622-simplifyifs-exception-edges/info.txt | 2 + .../smali/Test.smali | 76 + .../src/Main.java | 43 + .../623-checker-loop-regressions/expected.txt | 2 + test/623-checker-loop-regressions/info.txt | 1 + .../src/Main.java | 803 ++ test/624-checker-stringops/expected.txt | 1 + test/624-checker-stringops/info.txt | 1 + test/624-checker-stringops/smali/Smali.smali | 196 + test/624-checker-stringops/src/Main.java | 329 + .../625-checker-licm-regressions/expected.txt | 1 + test/625-checker-licm-regressions/info.txt | 1 + .../src/Main.java | 135 + .../expected.txt | 1 + .../info.txt | 2 + .../smali/Main2.smali | 1768 +++ .../src-art/Main.java | 23 + .../src/Main.java | 22 + .../clear_dex_cache_types.cc | 71 + test/626-const-class-linking/expected.txt | 61 + test/626-const-class-linking/info.txt | 3 + .../src-multidex/Helper2.java | 23 + .../src-multidex/Helper3.java | 23 + .../src-multidex/Test.java | 18 + .../src-multidex/Test3.java | 18 + .../src/ClassPair.java | 32 + .../src/DefiningLoader.java | 239 + .../src/DelegatingLoader.java | 45 + test/626-const-class-linking/src/Helper1.java | 23 + test/626-const-class-linking/src/Main.java | 356 + .../src/MisbehavingLoader.java | 47 + .../src/RacyLoader.java | 78 + .../src/RacyMisbehavingHelper.java | 33 + .../src/RacyMisbehavingLoader.java | 99 + test/626-set-resolved-string/expected.txt | 2 + test/626-set-resolved-string/info.txt | 3 + test/626-set-resolved-string/src/Main.java | 48 + test/627-checker-unroll/expected.txt | 1 + test/627-checker-unroll/info.txt | 1 + test/627-checker-unroll/src/Main.java | 119 + test/628-vdex/expected.txt | 2 + test/628-vdex/info.txt | 0 test/628-vdex/run | 17 + test/628-vdex/src/Main.java | 37 + test/629-vdex-speed/expected.txt | 1 + test/629-vdex-speed/info.txt | 2 + test/629-vdex-speed/run | 17 + test/629-vdex-speed/src/Main.java | 27 + test/630-safecast-array/expected.txt | 0 test/630-safecast-array/info.txt | 3 + test/630-safecast-array/smali/Main.smali | 33 + test/631-checker-fp-abs/expected.txt | 1 + test/631-checker-fp-abs/info.txt | 1 + test/631-checker-fp-abs/src/Main.java | 196 + test/631-checker-get-class/expected.txt | 0 test/631-checker-get-class/info.txt | 4 + test/631-checker-get-class/src/Main.java | 111 + test/632-checker-char-at-bounds/expected.txt | 0 test/632-checker-char-at-bounds/info.txt | 2 + test/632-checker-char-at-bounds/src/Main.java | 40 + test/633-checker-rtp-getclass/expected.txt | 3 + test/633-checker-rtp-getclass/info.txt | 3 + .../smali/SmaliTests.smali | 65 + test/633-checker-rtp-getclass/src/Main.java | 65 + test/634-vdex-duplicate/expected.txt | 1 + test/634-vdex-duplicate/info.txt | 0 test/634-vdex-duplicate/run | 17 + test/634-vdex-duplicate/src/Main.java | 21 + .../src/sun/misc/Unsafe.java | 20 + .../expected.txt | 1 + .../info.txt | 3 + .../src/Main.java | 284 + test/636-arm64-veneer-pool/expected.txt | 1 + test/636-arm64-veneer-pool/info.txt | 1 + test/636-arm64-veneer-pool/src/Main.java | 4223 +++++++ test/636-wrong-static-access/expected.txt | 1 + test/636-wrong-static-access/info.txt | 2 + test/636-wrong-static-access/run | 20 + test/636-wrong-static-access/src-ex/Foo.java | 38 + test/636-wrong-static-access/src/Holder.java | 19 + test/636-wrong-static-access/src/Main.java | 39 + test/636-wrong-static-access/src2/Holder.java | 19 + test/637-checker-throw-inline/expected.txt | 0 test/637-checker-throw-inline/info.txt | 1 + test/637-checker-throw-inline/src/Main.java | 64 + .../expected.txt | 1 + .../info.txt | 1 + test/638-checker-inline-cache-intrinsic/run | 22 + .../src/Main.java | 95 + test/638-checker-inline-caches/expected.txt | 0 test/638-checker-inline-caches/info.txt | 1 + test/638-checker-inline-caches/profile | 6 + test/638-checker-inline-caches/run | 17 + .../src-multidex/SubC.java | 19 + test/638-checker-inline-caches/src/Main.java | 196 + test/638-checker-inline-caches/src/Super.java | 19 + test/638-no-line-number/build | 22 + test/638-no-line-number/expected.txt | 5 + test/638-no-line-number/info.txt | 1 + test/638-no-line-number/src/Main.java | 34 + test/639-checker-code-sinking/expected.txt | 3 + test/639-checker-code-sinking/info.txt | 1 + test/639-checker-code-sinking/src/Main.java | 389 + test/640-checker-boolean-simd/expected.txt | 1 + test/640-checker-boolean-simd/info.txt | 1 + test/640-checker-boolean-simd/src/Main.java | 128 + test/640-checker-integer-valueof/expected.txt | 0 test/640-checker-integer-valueof/info.txt | 1 + .../640-checker-integer-valueof/src/Main.java | 93 + test/640-checker-simd/expected.txt | 7 + test/640-checker-simd/info.txt | 1 + test/640-checker-simd/src/Main.java | 27 + test/640-checker-simd/src/SimdByte.java | 272 + test/640-checker-simd/src/SimdChar.java | 265 + test/640-checker-simd/src/SimdDouble.java | 196 + test/640-checker-simd/src/SimdFloat.java | 195 + test/640-checker-simd/src/SimdInt.java | 307 + test/640-checker-simd/src/SimdLong.java | 307 + test/640-checker-simd/src/SimdShort.java | 264 + test/641-checker-arraycopy/expected.txt | 0 test/641-checker-arraycopy/info.txt | 2 + test/641-checker-arraycopy/src/Main.java | 83 + test/641-irreducible-inline/expected.txt | 1 + test/641-irreducible-inline/info.txt | 2 + .../smali/IrreducibleLoop.smali | 54 + test/641-irreducible-inline/src/Main.java | 29 + test/641-iterations/expected.txt | 1 + test/641-iterations/info.txt | 1 + test/641-iterations/src/Main.java | 73 + test/642-fp-callees/expected.txt | 2 + test/642-fp-callees/fp_callees.cc | 72 + test/642-fp-callees/info.txt | 2 + test/642-fp-callees/src/Main.java | 34 + test/643-checker-bogus-ic/expected.txt | 0 test/643-checker-bogus-ic/info.txt | 1 + test/643-checker-bogus-ic/profile | 2 + test/643-checker-bogus-ic/run | 17 + test/643-checker-bogus-ic/src/Main.java | 49 + test/645-checker-abs-simd/expected.txt | 1 + test/645-checker-abs-simd/info.txt | 1 + test/645-checker-abs-simd/src/Main.java | 407 + .../expected.txt | 1 + .../info.txt | 4 + .../src/Main.java | 37 + .../expected.txt | 1 + test/646-checker-long-const-to-int/info.txt | 1 + .../src/Main.java | 56 + test/646-checker-simd-hadd/expected.txt | 6 + test/646-checker-simd-hadd/info.txt | 1 + .../src/HaddAltByte.java | 266 + .../src/HaddAltChar.java | 277 + .../src/HaddAltShort.java | 271 + test/646-checker-simd-hadd/src/HaddByte.java | 261 + test/646-checker-simd-hadd/src/HaddChar.java | 308 + test/646-checker-simd-hadd/src/HaddShort.java | 442 + test/646-checker-simd-hadd/src/Main.java | 26 + test/647-jni-get-field-id/expected.txt | 28 + test/647-jni-get-field-id/get_field_id.cc | 43 + test/647-jni-get-field-id/info.txt | 1 + .../src/DefiningLoader.java | 239 + test/647-jni-get-field-id/src/Main.java | 107 + test/647-sinking-catch/expected.txt | 1 + test/647-sinking-catch/info.txt | 2 + test/647-sinking-catch/smali/TestCase.smali | 35 + test/647-sinking-catch/src/Main.java | 38 + .../648-inline-caches-unresolved/expected.txt | 1 + test/648-inline-caches-unresolved/info.txt | 1 + test/648-inline-caches-unresolved/profile | 1 + test/648-inline-caches-unresolved/run | 17 + .../UnresolvedSuperClass.java | 21 + .../src/Main.java | 31 + test/648-many-direct-methods/build | 25 + test/648-many-direct-methods/expected.txt | 1 + test/648-many-direct-methods/info.txt | 2 + .../util-src/generate_java.py | 137 + test/649-vdex-duplicate-method/classes.dex | Bin 0 -> 900 bytes test/649-vdex-duplicate-method/expected.txt | 1 + test/649-vdex-duplicate-method/info.txt | 1 + .../expected.txt | 1 + .../650-checker-inline-access-thunks/info.txt | 1 + .../src/Main.java | 60 + test/652-deopt-intrinsic/expected.txt | 1 + test/652-deopt-intrinsic/info.txt | 2 + test/652-deopt-intrinsic/run | 22 + test/652-deopt-intrinsic/src/Main.java | 48 + test/654-checker-periodic/expected.txt | 1 + test/654-checker-periodic/info.txt | 1 + test/654-checker-periodic/src/Main.java | 173 + test/655-checker-simd-arm-opt/expected.txt | 1 + test/655-checker-simd-arm-opt/info.txt | 1 + test/655-checker-simd-arm-opt/src/Main.java | 99 + test/655-jit-clinit/expected.txt | 1 + test/655-jit-clinit/info.txt | 3 + test/655-jit-clinit/src/Main.java | 57 + test/656-annotation-lookup-generic-jni/check | 21 + .../expected.txt | 3 + .../info.txt | 7 + .../src-art/Main.java | 76 + .../src-ex/DummyAnnotation.java | 17 + .../src-ex/Test.java | 28 + .../656-annotation-lookup-generic-jni/test.cc | 28 + test/656-checker-simd-opt/expected.txt | 1 + test/656-checker-simd-opt/info.txt | 1 + test/656-checker-simd-opt/smali/Smali.smali | 121 + test/656-checker-simd-opt/src/Main.java | 279 + test/656-loop-deopt/expected.txt | 1 + test/656-loop-deopt/info.txt | 2 + test/656-loop-deopt/src/Main.java | 135 + test/657-branches/expected.txt | 1 + test/657-branches/info.txt | 2 + test/657-branches/src/Main.java | 47 + test/658-fp-read-barrier/expected.txt | 0 test/658-fp-read-barrier/info.txt | 2 + test/658-fp-read-barrier/src/Main.java | 138 + test/659-unpadded-array/expected.txt | 0 test/659-unpadded-array/info.txt | 3 + test/659-unpadded-array/src-art/Main.java | 52 + test/660-checker-sad/expected.txt | 5 + test/660-checker-sad/info.txt | 1 + test/660-checker-sad/src/Main.java | 25 + test/660-checker-sad/src/SadByte.java | 151 + test/660-checker-sad/src/SadChar.java | 165 + test/660-checker-sad/src/SadInt.java | 167 + test/660-checker-sad/src/SadLong.java | 111 + test/660-checker-sad/src/SadShort.java | 165 + test/660-checker-simd-sad/expected.txt | 7 + test/660-checker-simd-sad/info.txt | 1 + test/660-checker-simd-sad/src/Main.java | 27 + .../660-checker-simd-sad/src/SimdSadByte.java | 332 + .../660-checker-simd-sad/src/SimdSadChar.java | 259 + test/660-checker-simd-sad/src/SimdSadInt.java | 244 + .../660-checker-simd-sad/src/SimdSadLong.java | 209 + .../src/SimdSadShort.java | 405 + .../src/SimdSadShort2.java | 389 + .../src/SimdSadShort3.java | 354 + test/660-clinit/expected.txt | 14 + test/660-clinit/info.txt | 1 + test/660-clinit/profile | 13 + test/660-clinit/run | 17 + test/660-clinit/src/Main.java | 236 + test/660-store-8-16/expected.txt | 0 test/660-store-8-16/info.txt | 3 + test/660-store-8-16/smali/TestCase.smali | 102 + test/660-store-8-16/src/Main.java | 64 + test/661-checker-simd-reduc/expected.txt | 1 + test/661-checker-simd-reduc/info.txt | 1 + test/661-checker-simd-reduc/src/Main.java | 477 + test/661-classloader-allocator/expected.txt | 1 + test/661-classloader-allocator/info.txt | 3 + .../src-ex/OtherClass.java | 28 + test/661-classloader-allocator/src/Main.java | 62 + .../expected.no-compiled-code.txt | 1 + test/661-oat-writer-layout/expected.txt | 64 + test/661-oat-writer-layout/info.txt | 4 + .../oat_writer_layout.cc | 82 + .../parse_oatdump_offsets.sh | 38 + test/661-oat-writer-layout/profile | 54 + test/661-oat-writer-layout/run | 22 + test/661-oat-writer-layout/src/Generated.java | 109 + test/661-oat-writer-layout/src/Main.java | 80 + test/661-oat-writer-layout/src/Test.java | 90 + test/662-regression-alias/expected.txt | 1 + test/662-regression-alias/info.txt | 1 + test/662-regression-alias/src/Main.java | 80 + .../663-checker-select-generator/expected.txt | 0 test/663-checker-select-generator/info.txt | 14 + .../smali/TestCase.smali | 72 + .../src/Main.java | 62 + test/663-odd-dex-size/classes.dex | Bin 0 -> 733 bytes test/663-odd-dex-size/expected.txt | 1 + test/663-odd-dex-size/info.txt | 14 + test/663-odd-dex-size2/663-odd-dex-size2.jar | Bin 0 -> 1136 bytes test/663-odd-dex-size2/build | 17 + test/663-odd-dex-size2/expected.txt | 1 + test/663-odd-dex-size2/info.txt | 15 + test/663-odd-dex-size3/663-odd-dex-size3.jar | Bin 0 -> 962 bytes test/663-odd-dex-size3/build | 17 + test/663-odd-dex-size3/expected.txt | 1 + test/663-odd-dex-size3/info.txt | 19 + test/663-odd-dex-size4/663-odd-dex-size4.jar | Bin 0 -> 962 bytes test/663-odd-dex-size4/build | 17 + test/663-odd-dex-size4/expected.txt | 1 + test/663-odd-dex-size4/info.txt | 19 + test/664-aget-verifier/aget-verifier.cc | 41 + test/664-aget-verifier/expected.txt | 2 + test/664-aget-verifier/info.txt | 6 + test/664-aget-verifier/src/Main.java | 49 + test/665-checker-simd-zero/expected.txt | 1 + test/665-checker-simd-zero/info.txt | 1 + test/665-checker-simd-zero/src/Main.java | 236 + test/666-dex-cache-itf/expected.txt | 0 test/666-dex-cache-itf/info.txt | 2 + test/666-dex-cache-itf/src/Main.java | 117 + test/667-checker-simd-alignment/expected.txt | 1 + test/667-checker-simd-alignment/info.txt | 1 + test/667-checker-simd-alignment/src/Main.java | 337 + test/667-jit-jni-stub/expected.txt | 1 + test/667-jit-jni-stub/info.txt | 1 + test/667-jit-jni-stub/jit_jni_stub_test.cc | 63 + test/667-jit-jni-stub/run | 19 + test/667-jit-jni-stub/src/Main.java | 183 + test/667-out-of-bounds/expected.txt | 1 + test/667-out-of-bounds/info.txt | 3 + test/667-out-of-bounds/src/Main.java | 30 + test/668-aiobe/expected.txt | 0 test/668-aiobe/info.txt | 2 + test/668-aiobe/smali/TestCase.smali | 30 + test/668-aiobe/src/Main.java | 36 + test/669-checker-break/expected.txt | 1 + test/669-checker-break/info.txt | 1 + test/669-checker-break/src/Main.java | 328 + test/670-bitstring-type-check/build | 216 + test/670-bitstring-type-check/expected.txt | 0 test/670-bitstring-type-check/info.txt | 1 + test/670-bitstring-type-check/run | 20 + test/671-npe-field-opts/expected.txt | 0 test/671-npe-field-opts/info.txt | 3 + test/671-npe-field-opts/src/Main.java | 86 + test/672-checker-throw-method/expected.txt | 1 + test/672-checker-throw-method/info.txt | 1 + test/672-checker-throw-method/src/Main.java | 316 + test/673-checker-throw-vmethod/expected.txt | 1 + test/673-checker-throw-vmethod/info.txt | 1 + test/673-checker-throw-vmethod/src/Main.java | 219 + test/674-HelloWorld-Dm/expected.txt | 1 + test/674-HelloWorld-Dm/info.txt | 1 + test/674-HelloWorld-Dm/run | 17 + test/674-HelloWorld-Dm/src/Main.java | 21 + test/674-hiddenapi/build | 38 + test/674-hiddenapi/check | 23 + test/674-hiddenapi/expected.txt | 0 test/674-hiddenapi/hiddenapi-flags.csv | 108 + test/674-hiddenapi/hiddenapi.cc | 336 + test/674-hiddenapi/info.txt | 16 + test/674-hiddenapi/run | 19 + test/674-hiddenapi/src-art/Main.java | 185 + test/674-hiddenapi/src-ex/ChildClass.java | 558 + test/674-hiddenapi/src-ex/JLI.java | 98 + test/674-hiddenapi/src-ex/JNI.java | 29 + test/674-hiddenapi/src-ex/Linking.java | 303 + test/674-hiddenapi/src-ex/Reflection.java | 244 + test/674-hiddenapi/src/DummyClass.java | 27 + .../src/NullaryConstructorBlacklist.java | 21 + ...onstructorBlacklistAndCorePlatformApi.java | 21 + .../src/NullaryConstructorDarkGreylist.java | 21 + .../src/NullaryConstructorLightGreylist.java | 21 + .../src/NullaryConstructorWhitelist.java | 21 + test/674-hiddenapi/src/ParentClass.java | 199 + test/674-hiddenapi/src/ParentInterface.java | 45 + test/674-hotness-compiled/expected.txt | 1 + test/674-hotness-compiled/info.txt | 1 + test/674-hotness-compiled/run | 17 + test/674-hotness-compiled/src/Main.java | 54 + test/674-vdex-uncompress/build | 19 + test/674-vdex-uncompress/expected.txt | 2 + test/674-vdex-uncompress/info.txt | 2 + test/674-vdex-uncompress/run | 17 + test/674-vdex-uncompress/src/Main.java | 37 + .../expected.txt | 1 + test/675-checker-unverified-method/info.txt | 1 + .../smali/TestCase.smali | 55 + .../src/Main.java | 28 + test/676-proxy-jit-at-first-use/expected.txt | 1 + test/676-proxy-jit-at-first-use/info.txt | 1 + test/676-proxy-jit-at-first-use/run | 19 + test/676-proxy-jit-at-first-use/src/Main.java | 39 + test/676-resolve-field-type/expected.txt | 1 + test/676-resolve-field-type/info.txt | 2 + test/676-resolve-field-type/src-art/Foo.java | 19 + test/676-resolve-field-type/src-art/Main.java | 31 + .../src-ex/ChildClass.java | 72 + test/677-fsi/build | 17 + test/677-fsi/check | 23 + test/677-fsi/expected.txt | 2 + test/677-fsi/info.txt | 0 test/677-fsi/run | 19 + test/677-fsi/src/Main.java | 21 + test/677-fsi2/expected.txt | 1 + test/677-fsi2/info.txt | 1 + test/677-fsi2/run | 17 + test/677-fsi2/src/Main.java | 21 + test/678-quickening/expected.txt | 1 + test/678-quickening/info.txt | 1 + test/678-quickening/run | 18 + test/678-quickening/src-art/Main.java | 79 + test/679-checker-minmax/expected.txt | 1 + test/679-checker-minmax/info.txt | 1 + test/679-checker-minmax/src/Main.java | 801 ++ test/679-locks/expected.txt | 2 + test/679-locks/info.txt | 2 + test/679-locks/run | 18 + test/679-locks/src/Main.java | 50 + test/680-checker-deopt-dex-pc-0/expected.txt | 2 + test/680-checker-deopt-dex-pc-0/info.txt | 2 + test/680-checker-deopt-dex-pc-0/src/Main.java | 57 + test/680-sink-regression/expected.txt | 1 + test/680-sink-regression/info.txt | 1 + test/680-sink-regression/src/Main.java | 87 + test/681-checker-abs/expected.txt | 1 + test/681-checker-abs/info.txt | 1 + test/681-checker-abs/src/Main.java | 328 + test/682-double-catch-phi/expected.txt | 1 + test/682-double-catch-phi/info.txt | 1 + .../smali/DoubleCatchPhi.smali | 47 + test/682-double-catch-phi/src/Main.java | 26 + .../expected.txt | 0 test/683-clinit-inline-static-invoke/info.txt | 3 + .../src-multidex/MyTimeZone.java | 22 + .../src/Main.java | 31 + test/684-checker-simd-dotprod/expected.txt | 1 + test/684-checker-simd-dotprod/info.txt | 1 + test/684-checker-simd-dotprod/src/Main.java | 33 + .../src/other/TestByte.java | 484 + .../src/other/TestCharShort.java | 552 + .../src/other/TestFloatDouble.java | 93 + .../src/other/TestVarious.java | 422 + test/684-select-condition/expected.txt | 0 test/684-select-condition/info.txt | 1 + test/684-select-condition/src/Main.java | 83 + test/685-deoptimizeable/expected.txt | 2 + test/685-deoptimizeable/info.txt | 1 + test/685-deoptimizeable/src/Main.java | 252 + test/685-shifts/expected.txt | 0 test/685-shifts/info.txt | 1 + test/685-shifts/smali/Test.smali | 58 + test/685-shifts/src/Main.java | 96 + test/686-get-this/expected.txt | 1 + test/686-get-this/info.txt | 2 + test/686-get-this/smali/Test.smali | 45 + test/686-get-this/src/Main.java | 47 + test/687-deopt/expected.txt | 1 + test/687-deopt/info.txt | 2 + test/687-deopt/src/Main.java | 53 + test/688-shared-library/check | 22 + test/688-shared-library/expected.txt | 0 test/688-shared-library/info.txt | 2 + test/688-shared-library/run | 19 + test/688-shared-library/src-art/Main.java | 194 + test/688-shared-library/src-ex/Main.java | 18 + .../src-ex/SharedLibraryOne.java | 18 + test/689-multi-catch/expected.txt | 0 test/689-multi-catch/info.txt | 2 + test/689-multi-catch/src/Main.java | 32 + test/689-zygote-jit-deopt/expected.txt | 1 + test/689-zygote-jit-deopt/info.txt | 2 + test/689-zygote-jit-deopt/run | 17 + test/689-zygote-jit-deopt/src/Main.java | 40 + test/690-hiddenapi-same-name-methods/build | 17 + .../expected.txt | 1 + .../hiddenapi-flags.csv | 9 + test/690-hiddenapi-same-name-methods/info.txt | 1 + .../smali-ex/DirectMethods.smali | 46 + .../smali-ex/NonSyntheticMethods.smali | 46 + .../smali-ex/SyntheticMethods.smali | 46 + .../smali-ex/VirtualMethods.smali | 46 + .../src-ex/GenericInterface.java | 19 + .../src-ex/SpecificClass.java | 21 + .../src/Main.java | 106 + test/691-hiddenapi-proxy/build | 17 + test/691-hiddenapi-proxy/expected.txt | 6 + test/691-hiddenapi-proxy/hiddenapi-flags.csv | 1 + test/691-hiddenapi-proxy/info.txt | 3 + .../src-ex/MyInterface.java | 21 + test/691-hiddenapi-proxy/src/Main.java | 56 + test/692-vdex-inmem-loader/expected.txt | 8 + test/692-vdex-inmem-loader/info.txt | 3 + .../src-ex/DummyClass.java | 18 + .../src-secondary/art/ClassA.java | 23 + .../src-secondary/art/ClassB.java | 23 + .../src-secondary/gen.sh | 37 + test/692-vdex-inmem-loader/src/Main.java | 168 + .../vdex_inmem_loader.cc | 191 + test/693-vdex-inmem-loader-evict/expected.txt | 1 + test/693-vdex-inmem-loader-evict/info.txt | 2 + .../src-secondary/gen.sh | 54 + .../693-vdex-inmem-loader-evict/src/Main.java | 192 + test/694-clinit-jit/expected.txt | 1 + test/694-clinit-jit/info.txt | 3 + test/694-clinit-jit/src/Main.java | 89 + test/695-simplify-throws/expected.txt | 0 test/695-simplify-throws/info.txt | 3 + test/695-simplify-throws/src/Main.java | 45 + test/696-loop/expected.txt | 0 test/696-loop/info.txt | 2 + test/696-loop/src/Main.java | 38 + test/697-checker-string-append/expected.txt | 1 + test/697-checker-string-append/info.txt | 1 + test/697-checker-string-append/src/Main.java | 299 + test/698-selects/expected.txt | 0 test/698-selects/info.txt | 2 + test/698-selects/src/Main.java | 42 + test/699-checker-string-append2/expected.txt | 1 + test/699-checker-string-append2/info.txt | 1 + .../smali/B146014745.smali | 163 + test/699-checker-string-append2/src/Main.java | 40 + test/700-LoadArgRegs/expected.txt | 77 + test/700-LoadArgRegs/info.txt | 1 + test/700-LoadArgRegs/src/Main.java | 303 + test/701-easy-div-rem/build | 24 + test/701-easy-div-rem/expected.txt | 8 + test/701-easy-div-rem/genMain.py | 170 + test/701-easy-div-rem/info.txt | 1 + test/702-LargeBranchOffset/build | 23 + test/702-LargeBranchOffset/expected.txt | 5 + test/702-LargeBranchOffset/info.txt | 1 + test/702-LargeBranchOffset/src/Main.java.in | 47 + test/703-floating-point-div/expected.txt | 1 + test/703-floating-point-div/info.txt | 1 + test/703-floating-point-div/src/Main.java | 96 + test/704-multiply-accumulate/expected.txt | 1 + test/704-multiply-accumulate/info.txt | 1 + test/704-multiply-accumulate/src/Main.java | 171 + test/705-register-conflict/expected.txt | 1 + test/705-register-conflict/info.txt | 1 + test/705-register-conflict/src/Main.java | 73 + test/706-checker-scheduler/expected.txt | 0 test/706-checker-scheduler/info.txt | 1 + test/706-checker-scheduler/run | 18 + .../UnresolvedClass.java | 21 + test/706-checker-scheduler/src/Main.java | 711 ++ test/707-checker-invalid-profile/check | 21 + test/707-checker-invalid-profile/expected.txt | 1 + test/707-checker-invalid-profile/info.txt | 2 + test/707-checker-invalid-profile/profile | 4 + test/707-checker-invalid-profile/run | 17 + .../707-checker-invalid-profile/src/Main.java | 43 + test/708-jit-cache-churn/expected.txt | 2 + test/708-jit-cache-churn/info.txt | 1 + test/708-jit-cache-churn/jit.cc | 56 + .../src/JitCacheChurnTest.java | 279 + test/708-jit-cache-churn/src/Main.java | 31 + test/709-checker-varhandles/build | 20 + test/709-checker-varhandles/expected.txt | 2 + test/709-checker-varhandles/info.txt | 1 + test/709-checker-varhandles/src/Main.java | 104 + test/710-varhandle-creation/build | 20 + test/710-varhandle-creation/expected.txt | 6 + test/710-varhandle-creation/info.txt | 2 + test/710-varhandle-creation/src/Main.java | 2413 ++++ test/711-checker-type-conversion/expected.txt | 0 test/711-checker-type-conversion/info.txt | 1 + .../711-checker-type-conversion/src/Main.java | 264 + test/712-varhandle-invocations/build | 35 + test/712-varhandle-invocations/expected.txt | 3183 ++++++ test/712-varhandle-invocations/info.txt | 1 + .../src/SampleValues.java | 130 + .../src/SimpleTests.java | 61 + .../src/VarHandleAccessorExceptionTests.java | 232 + .../src/VarHandleBadCoordinateTests.java | 948 ++ .../src/VarHandleReflectiveTest.java | 58 + .../src/VarHandleTypeConversionTests.java | 1343 +++ .../src/VarHandleUnitTest.java | 190 + .../src/VarHandleUnitTestCollector.java | 83 + .../src/VarHandleUnitTestHelpers.java | 281 + .../712-varhandle-invocations/src/Widget.java | 39 + .../util-src/generate_java.py | 880 ++ test/713-varhandle-invokers/build | 20 + test/713-varhandle-invokers/expected.txt | 4 + test/713-varhandle-invokers/info.txt | 1 + test/713-varhandle-invokers/src/Main.java | 440 + .../build | 22 + .../expected.txt | 5 + .../info.txt | 1 + test/714-invoke-custom-lambda-metafactory/run | 19 + .../src/Main.java | 32 + .../build | 20 + .../expected.txt | 113 + .../info.txt | 5 + .../src/Main.java | 215 + test/716-jli-jit-samples/build | 20 + test/716-jli-jit-samples/expected.txt | 3 + test/716-jli-jit-samples/info.txt | 2 + test/716-jli-jit-samples/src-art/Main.java | 142 + test/717-integer-value-of/expected.txt | 1 + test/717-integer-value-of/info.txt | 2 + test/717-integer-value-of/src/Main.java | 134 + test/718-zipfile-finalizer/expected.txt | 0 test/718-zipfile-finalizer/info.txt | 2 + test/718-zipfile-finalizer/src/Main.java | 40 + test/719-dm-verify-redefinition/check | 22 + test/719-dm-verify-redefinition/expected.txt | 2 + test/719-dm-verify-redefinition/info.txt | 2 + test/719-dm-verify-redefinition/run | 26 + .../src-ex/Redefined.java | 18 + test/719-dm-verify-redefinition/src/Main.java | 21 + .../src/Redefined.java | 18 + test/720-thread-priority/expected.txt | 1 + test/720-thread-priority/info.txt | 2 + test/720-thread-priority/src/Main.java | 65 + test/720-thread-priority/thread_priority.cc | 28 + test/721-osr/expected.txt | 0 test/721-osr/info.txt | 3 + test/721-osr/src/Main.java | 45 + test/723-string-init-range/expected.txt | 0 test/723-string-init-range/info.txt | 1 + .../smali/new-instance.smali | 25 + test/723-string-init-range/src/Main.java | 30 + test/724-invoke-super-npe/expected.txt | 0 test/724-invoke-super-npe/info.txt | 1 + .../724-invoke-super-npe/smali/TestCase.smali | 42 + test/724-invoke-super-npe/src/Main.java | 42 + test/725-imt-conflict-object/expected.txt | 0 test/725-imt-conflict-object/info.txt | 2 + .../smali/TestCase.smali | 25 + test/725-imt-conflict-object/src/Main.java | 246 + test/800-smali/expected.txt | 83 + test/800-smali/info.txt | 4 + test/800-smali/jni.cc | 41 + test/800-smali/smali/B30458218.smali | 27 + .../smali/BadCaseInOpRegRegReg.smali | 13 + test/800-smali/smali/CmpLong.smali | 18 + test/800-smali/smali/ConstClassAliasing.smali | 12 + test/800-smali/smali/EmptySparseSwitch.smali | 17 + test/800-smali/smali/FloatBadArgReg.smali | 16 + .../smali/FloatIntConstPassing.smali | 29 + test/800-smali/smali/PackedSwitch.smali | 52 + test/800-smali/smali/b_121191566.smali | 26 + test/800-smali/smali/b_121245951.smali | 26 + test/800-smali/smali/b_121245951_2.smali | 30 + test/800-smali/smali/b_121245951_3.smali | 33 + test/800-smali/smali/b_122501785.smali | 14 + test/800-smali/smali/b_134061982.smali | 60 + test/800-smali/smali/b_134061983_2.smali | 61 + test/800-smali/smali/b_17410612.smali | 14 + test/800-smali/smali/b_17790197.smali | 17 + .../smali/b_18380491AbstractBase.smali | 12 + .../smali/b_18380491ConcreteClass.smali | 19 + test/800-smali/smali/b_18718277.smali | 29 + test/800-smali/smali/b_18800943_1.smali | 9 + test/800-smali/smali/b_18800943_2.smali | 9 + test/800-smali/smali/b_20224106.smali | 16 + test/800-smali/smali/b_20843113.smali | 34 + test/800-smali/smali/b_21614284.smali | 22 + test/800-smali/smali/b_21645819.smali | 9 + test/800-smali/smali/b_21863767.smali | 29 + test/800-smali/smali/b_21869691A.smali | 47 + test/800-smali/smali/b_21869691B.smali | 33 + test/800-smali/smali/b_21869691C.smali | 12 + test/800-smali/smali/b_21869691I.smali | 11 + test/800-smali/smali/b_21873167.smali | 18 + test/800-smali/smali/b_21886894.smali | 15 + test/800-smali/smali/b_21902684.smali | 17 + test/800-smali/smali/b_22045582.smali | 13 + test/800-smali/smali/b_22045582_int.smali | 11 + test/800-smali/smali/b_22045582_wide.smali | 11 + test/800-smali/smali/b_22080519.smali | 27 + test/800-smali/smali/b_22244733.smali | 7 + test/800-smali/smali/b_22331663.smali | 39 + test/800-smali/smali/b_22331663_fail.smali | 20 + test/800-smali/smali/b_22331663_pass.smali | 22 + test/800-smali/smali/b_22411633_1.smali | 35 + test/800-smali/smali/b_22411633_2.smali | 45 + test/800-smali/smali/b_22411633_3.smali | 31 + test/800-smali/smali/b_22411633_4.smali | 25 + test/800-smali/smali/b_22411633_5.smali | 28 + test/800-smali/smali/b_22777307.smali | 18 + test/800-smali/smali/b_22881413.smali | 295 + test/800-smali/smali/b_23201502.smali | 23 + test/800-smali/smali/b_23300986.smali | 23 + test/800-smali/smali/b_23502994.smali | 45 + test/800-smali/smali/b_24399945.smali | 32 + test/800-smali/smali/b_25494456.smali | 14 + test/800-smali/smali/b_26143249.smali | 20 + test/800-smali/smali/b_26579108.smali | 34 + test/800-smali/smali/b_26594149_1.smali | 26 + test/800-smali/smali/b_26594149_2.smali | 26 + test/800-smali/smali/b_26594149_3.smali | 28 + test/800-smali/smali/b_26594149_4.smali | 38 + test/800-smali/smali/b_26594149_5.smali | 28 + test/800-smali/smali/b_26594149_6.smali | 24 + test/800-smali/smali/b_26594149_7.smali | 30 + test/800-smali/smali/b_26594149_8.smali | 24 + test/800-smali/smali/b_26965384.smali | 20 + test/800-smali/smali/b_26965384Super.smali | 10 + test/800-smali/smali/b_27148248.smali | 27 + test/800-smali/smali/b_27799205_1.smali | 37 + test/800-smali/smali/b_27799205_2.smali | 37 + test/800-smali/smali/b_27799205_3.smali | 39 + test/800-smali/smali/b_27799205_4.smali | 39 + test/800-smali/smali/b_27799205_5.smali | 39 + test/800-smali/smali/b_27799205_6.smali | 24 + test/800-smali/smali/b_27799205_helper.smali | 47 + test/800-smali/smali/b_28187158.smali | 11 + test/800-smali/smali/b_29778499_1.smali | 19 + test/800-smali/smali/b_29778499_2.smali | 13 + test/800-smali/smali/b_31313170.smali | 22 + test/800-smali/smali/move_exc.smali | 29 + .../smali/move_exception_on_entry.smali | 30 + test/800-smali/smali/negLong.smali | 186 + test/800-smali/smali/sameFieldNames.smali | 64 + test/800-smali/src/Main.java | 303 + test/800-smali/src/pkg/ProtectedClass.java | 20 + test/801-VoidCheckCast/classes.dex | Bin 0 -> 660 bytes test/801-VoidCheckCast/expected.txt | 0 test/801-VoidCheckCast/info.txt | 4 + test/802-deoptimization/expected.txt | 1 + test/802-deoptimization/info.txt | 1 + .../smali/catch_handler_on_entry.smali | 29 + .../src/CatchHandlerOnEntryHelper.java | 30 + .../src/DeoptimizationController.java | 101 + test/802-deoptimization/src/Main.java | 43 + test/803-no-super/expected.txt | 2 + test/803-no-super/info.txt | 3 + test/803-no-super/smali/nosuper1.smali | 3 + test/803-no-super/src/Main.java | 29 + test/804-class-extends-itself/build | 20 + test/804-class-extends-itself/expected.txt | 2 + test/804-class-extends-itself/info.txt | 1 + .../804-class-extends-itself/smali/Main.smali | 57 + .../smali/b_28685551.smali | 18 + test/805-TooDeepClassInstanceOf/expected.txt | 1 + test/805-TooDeepClassInstanceOf/info.txt | 1 + test/805-TooDeepClassInstanceOf/src/Main.java | 80 + test/806-TooWideClassInstanceOf/expected.txt | 1 + test/806-TooWideClassInstanceOf/info.txt | 2 + test/806-TooWideClassInstanceOf/src/Main.java | 1280 +++ test/807-method-handle-and-mr/build | 20 + test/807-method-handle-and-mr/expected.txt | 4 + test/807-method-handle-and-mr/info.txt | 2 + test/807-method-handle-and-mr/src/Main.java | 91 + test/900-hello-plugin/expected.txt | 10 + test/900-hello-plugin/info.txt | 2 + test/900-hello-plugin/load_unload.cc | 74 + test/900-hello-plugin/run | 45 + test/900-hello-plugin/src/Main.java | 21 + test/901-hello-ti-agent/basics.cc | 208 + test/901-hello-ti-agent/basics.h | 30 + test/901-hello-ti-agent/expected.txt | 76 + test/901-hello-ti-agent/info.txt | 1 + test/901-hello-ti-agent/run | 17 + test/901-hello-ti-agent/src/Main.java | 21 + test/901-hello-ti-agent/src/art/Main.java | 1 + test/901-hello-ti-agent/src/art/Test901.java | 84 + test/902-hello-transformation/expected.txt | 2 + test/902-hello-transformation/info.txt | 1 + test/902-hello-transformation/run | 17 + test/902-hello-transformation/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test902.java | 82 + test/903-hello-tagging/expected.txt | 11 + test/903-hello-tagging/info.txt | 1 + test/903-hello-tagging/run | 17 + test/903-hello-tagging/src/Main.java | 21 + test/903-hello-tagging/src/art/Main.java | 1 + test/903-hello-tagging/src/art/Test903.java | 177 + test/903-hello-tagging/tagging.cc | 165 + test/904-object-allocation/expected.txt | 8 + test/904-object-allocation/info.txt | 1 + test/904-object-allocation/run | 17 + test/904-object-allocation/src/Main.java | 21 + .../src/art/Test904.java | 159 + test/904-object-allocation/tracking.cc | 162 + test/905-object-free/expected.txt | 13 + test/905-object-free/info.txt | 1 + test/905-object-free/run | 17 + test/905-object-free/src/Main.java | 21 + test/905-object-free/src/art/Main.java | 1 + test/905-object-free/src/art/Test905.java | 199 + test/905-object-free/tracking_free.cc | 122 + test/906-iterate-heap/expected.txt | 45 + test/906-iterate-heap/info.txt | 1 + test/906-iterate-heap/iterate_heap.cc | 438 + test/906-iterate-heap/run | 17 + test/906-iterate-heap/src/Main.java | 21 + test/906-iterate-heap/src/art/Main.java | 1 + test/906-iterate-heap/src/art/Test906.java | 369 + test/907-get-loaded-classes/expected.txt | 0 .../get_loaded_classes.cc | 70 + test/907-get-loaded-classes/info.txt | 1 + test/907-get-loaded-classes/run | 17 + test/907-get-loaded-classes/src/Main.java | 21 + test/907-get-loaded-classes/src/art/Cerr.java | 27 + .../src/art/Test907.java | 83 + test/908-gc-start-finish/expected.txt | 12 + test/908-gc-start-finish/gc_callbacks.cc | 88 + test/908-gc-start-finish/info.txt | 1 + test/908-gc-start-finish/run | 17 + test/908-gc-start-finish/src/Main.java | 21 + test/908-gc-start-finish/src/art/Test908.java | 79 + test/909-attach-agent/attach.cc | 98 + test/909-attach-agent/attach.h | 30 + test/909-attach-agent/disallow_debugging.cc | 27 + test/909-attach-agent/expected.txt | 28 + test/909-attach-agent/info.txt | 1 + .../interpreter-expected.patch | 4 + test/909-attach-agent/run | 88 + test/909-attach-agent/src-art/Main.java | 120 + test/910-methods/expected.txt | 59 + test/910-methods/info.txt | 1 + test/910-methods/methods.cc | 196 + test/910-methods/run | 17 + test/910-methods/src/Main.java | 21 + test/910-methods/src/art/Test910.java | 194 + .../expected-cts-version.txt | 485 + test/911-get-stack-trace/expected.txt | 919 ++ test/911-get-stack-trace/info.txt | 1 + test/911-get-stack-trace/run | 17 + test/911-get-stack-trace/src/Main.java | 21 + .../src/art/AllTraces.java | 82 + .../src/art/ControlData.java | 33 + test/911-get-stack-trace/src/art/Frames.java | 135 + .../src/art/OtherThread.java | 85 + .../src/art/PrintThread.java | 92 + test/911-get-stack-trace/src/art/Recurse.java | 60 + .../src/art/SameThread.java | 36 + test/911-get-stack-trace/src/art/Test911.java | 64 + .../src/art/ThreadListTraces.java | 73 + test/911-get-stack-trace/stack_trace.cc | 258 + test/912-classes/classes.cc | 626 ++ test/912-classes/classes_art.cc | 146 + test/912-classes/expected.txt | 97 + test/912-classes/info.txt | 1 + test/912-classes/run | 22 + test/912-classes/src-art/Main.java | 22 + test/912-classes/src-art/art/DexData.java | 100 + test/912-classes/src-art/art/Main.java | 28 + test/912-classes/src-art/art/Test912.java | 513 + test/912-classes/src-art/art/Test912Art.java | 77 + test/913-heaps/expected.txt | 397 + test/913-heaps/heaps.cc | 1142 ++ test/913-heaps/info.txt | 1 + test/913-heaps/run | 17 + test/913-heaps/src/Main.java | 21 + test/913-heaps/src/art/Main.java | 1 + test/913-heaps/src/art/Test913.java | 761 ++ test/914-hello-obsolescence/expected.txt | 9 + test/914-hello-obsolescence/info.txt | 1 + test/914-hello-obsolescence/run | 17 + test/914-hello-obsolescence/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test914.java | 86 + test/915-obsolete-2/expected.txt | 21 + test/915-obsolete-2/info.txt | 1 + test/915-obsolete-2/run | 17 + test/915-obsolete-2/src/Main.java | 21 + test/915-obsolete-2/src/art/Redefinition.java | 1 + test/915-obsolete-2/src/art/Test915.java | 123 + test/916-obsolete-jit/expected.txt | 21 + test/916-obsolete-jit/info.txt | 1 + test/916-obsolete-jit/run | 17 + test/916-obsolete-jit/src/Main.java | 173 + test/916-obsolete-jit/src/Transform.java | 37 + .../src/art/Redefinition.java | 1 + test/917-fields-transformation/expected.txt | 12 + test/917-fields-transformation/info.txt | 1 + test/917-fields-transformation/run | 17 + test/917-fields-transformation/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test917.java | 97 + test/918-fields/expected.txt | 24 + test/918-fields/fields.cc | 135 + test/918-fields/info.txt | 1 + test/918-fields/run | 17 + test/918-fields/src/Main.java | 21 + test/918-fields/src/art/Test918.java | 78 + test/919-obsolete-fields/expected.txt | 21 + test/919-obsolete-fields/info.txt | 1 + test/919-obsolete-fields/run | 17 + test/919-obsolete-fields/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/919-obsolete-fields/src/art/Test919.java | 173 + test/920-objects/expected.txt | 10 + test/920-objects/info.txt | 1 + test/920-objects/objects.cc | 63 + test/920-objects/run | 17 + test/920-objects/src/Main.java | 21 + test/920-objects/src/art/Test920.java | 98 + test/921-hello-failure/expected.txt | 58 + test/921-hello-failure/info.txt | 7 + test/921-hello-failure/run | 18 + .../src/CommonClassDefinition.java | 27 + .../src/DifferentAccess.java | 57 + test/921-hello-failure/src/FieldChange.java | 61 + test/921-hello-failure/src/Iface1.java | 19 + test/921-hello-failure/src/Iface2.java | 19 + test/921-hello-failure/src/Iface3.java | 19 + test/921-hello-failure/src/Iface4.java | 23 + test/921-hello-failure/src/Main.java | 77 + test/921-hello-failure/src/MethodChange.java | 57 + test/921-hello-failure/src/MissingField.java | 58 + .../src/MissingInterface.java | 58 + test/921-hello-failure/src/MissingMethod.java | 57 + test/921-hello-failure/src/MultiRedef.java | 100 + test/921-hello-failure/src/MultiRetrans.java | 108 + test/921-hello-failure/src/NewField.java | 60 + test/921-hello-failure/src/NewInterface.java | 59 + test/921-hello-failure/src/NewMethod.java | 60 + test/921-hello-failure/src/NewName.java | 56 + .../src/ReorderInterface.java | 59 + test/921-hello-failure/src/Transform.java | 21 + test/921-hello-failure/src/Transform2.java | 21 + test/921-hello-failure/src/Transform3.java | 24 + test/921-hello-failure/src/Transform4.java | 25 + test/921-hello-failure/src/Transform5.java | 21 + test/921-hello-failure/src/Undefault.java | 64 + test/921-hello-failure/src/Unmodifiable.java | 52 + test/921-hello-failure/src/Verification.java | 82 + .../src/art/Redefinition.java | 1 + test/922-properties/expected.txt | 59 + test/922-properties/info.txt | 1 + test/922-properties/properties.cc | 95 + test/922-properties/run | 17 + test/922-properties/src/Main.java | 21 + test/922-properties/src/art/Test922.java | 155 + test/923-monitors/expected.txt | 38 + test/923-monitors/info.txt | 1 + test/923-monitors/monitors.cc | 87 + test/923-monitors/run | 17 + test/923-monitors/src/Main.java | 21 + test/923-monitors/src/art/Test923.java | 298 + test/924-threads/expected.txt | 54 + test/924-threads/info.txt | 1 + test/924-threads/run | 17 + test/924-threads/src/Main.java | 21 + test/924-threads/src/art/Test924.java | 494 + test/924-threads/threads.cc | 276 + test/925-threadgroups/expected.txt | 21 + test/925-threadgroups/info.txt | 1 + test/925-threadgroups/run | 17 + test/925-threadgroups/src/Main.java | 21 + test/925-threadgroups/src/art/Test925.java | 164 + test/925-threadgroups/threadgroups.cc | 129 + test/926-multi-obsolescence/expected.txt | 15 + test/926-multi-obsolescence/info.txt | 2 + test/926-multi-obsolescence/run | 17 + test/926-multi-obsolescence/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test926.java | 140 + test/927-timers/expected.txt | 3 + test/927-timers/info.txt | 1 + test/927-timers/run | 17 + test/927-timers/src/Main.java | 21 + test/927-timers/src/art/Test927.java | 62 + test/927-timers/timers.cc | 87 + test/928-jni-table/expected.txt | 1 + test/928-jni-table/info.txt | 5 + test/928-jni-table/jni_table.cc | 117 + test/928-jni-table/run | 17 + test/928-jni-table/src/Main.java | 21 + test/928-jni-table/src/art/Test928.java | 37 + test/929-search/expected.txt | 1 + test/929-search/info.txt | 1 + test/929-search/run | 21 + test/929-search/search.cc | 54 + test/929-search/src-ex/A.java | 18 + test/929-search/src/B.java | 18 + test/929-search/src/Main.java | 52 + test/930-hello-retransform/expected.txt | 2 + test/930-hello-retransform/info.txt | 1 + test/930-hello-retransform/run | 17 + test/930-hello-retransform/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test930.java | 78 + test/931-agent-thread/agent_thread.cc | 184 + test/931-agent-thread/expected.txt | 1 + test/931-agent-thread/info.txt | 1 + test/931-agent-thread/run | 21 + test/931-agent-thread/src/Main.java | 21 + test/931-agent-thread/src/art/Test931.java | 29 + test/932-transform-saves/expected.txt | 3 + test/932-transform-saves/info.txt | 1 + test/932-transform-saves/run | 17 + test/932-transform-saves/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/932-transform-saves/src/art/Test932.java | 129 + test/933-misc-events/expected.txt | 2 + test/933-misc-events/info.txt | 1 + test/933-misc-events/misc_events.cc | 74 + test/933-misc-events/run | 21 + test/933-misc-events/src/Main.java | 21 + test/933-misc-events/src/art/Test933.java | 27 + test/934-load-transform/expected.txt | 1 + test/934-load-transform/info.txt | 1 + test/934-load-transform/run | 17 + test/934-load-transform/src-ex/TestMain.java | 21 + test/934-load-transform/src-ex/Transform.java | 21 + test/934-load-transform/src/Main.java | 92 + .../src/art/Redefinition.java | 1 + test/935-non-retransformable/expected.txt | 6 + test/935-non-retransformable/info.txt | 1 + test/935-non-retransformable/run | 17 + .../src-ex/TestMain.java | 30 + .../src-ex/Transform.java | 21 + test/935-non-retransformable/src/Main.java | 103 + .../src/art/Redefinition.java | 1 + test/936-search-onload/expected.txt | 3 + test/936-search-onload/info.txt | 1 + test/936-search-onload/run | 21 + test/936-search-onload/search_onload.cc | 65 + test/936-search-onload/search_onload.h | 30 + test/936-search-onload/src-ex/A.java | 18 + test/936-search-onload/src/B.java | 18 + test/936-search-onload/src/Main.java | 47 + .../expected.txt | 2 + test/937-hello-retransform-package/info.txt | 1 + test/937-hello-retransform-package/run | 17 + .../src/Main.java | 68 + .../src/Transform.java | 22 + .../src/art/Redefinition.java | 1 + test/938-load-transform-bcp/expected.txt | 2 + test/938-load-transform-bcp/info.txt | 1 + test/938-load-transform-bcp/run | 17 + .../src-ex/TestMain.java | 35 + test/938-load-transform-bcp/src/Main.java | 117 + .../src/art/Redefinition.java | 1 + .../939-hello-transformation-bcp/expected.txt | 3 + test/939-hello-transformation-bcp/info.txt | 6 + test/939-hello-transformation-bcp/run | 17 + .../src/Main.java | 122 + .../src/art/Redefinition.java | 1 + test/940-recursive-obsolete/expected.txt | 21 + test/940-recursive-obsolete/info.txt | 1 + test/940-recursive-obsolete/run | 17 + test/940-recursive-obsolete/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test940.java | 106 + test/941-recurive-obsolete-jit/expected.txt | 22 + test/941-recurive-obsolete-jit/info.txt | 1 + test/941-recurive-obsolete-jit/run | 17 + test/941-recurive-obsolete-jit/src/Main.java | 152 + .../src/Transform.java | 29 + .../src/art/Redefinition.java | 1 + test/942-private-recursive/expected.txt | 21 + test/942-private-recursive/info.txt | 1 + test/942-private-recursive/run | 17 + test/942-private-recursive/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test942.java | 115 + test/943-private-recursive-jit/expected.txt | 22 + test/943-private-recursive-jit/info.txt | 1 + test/943-private-recursive-jit/run | 17 + test/943-private-recursive-jit/src/Main.java | 168 + .../src/Transform.java | 33 + .../src/art/Redefinition.java | 1 + test/944-transform-classloaders/expected.txt | 5 + test/944-transform-classloaders/info.txt | 7 + test/944-transform-classloaders/run | 17 + test/944-transform-classloaders/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test944.java | 297 + test/945-obsolete-native/expected.txt | 9 + test/945-obsolete-native/info.txt | 1 + test/945-obsolete-native/obsolete_native.cc | 43 + test/945-obsolete-native/run | 17 + test/945-obsolete-native/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/945-obsolete-native/src/art/Test945.java | 96 + test/946-obsolete-throw/expected.txt | 15 + test/946-obsolete-throw/info.txt | 3 + test/946-obsolete-throw/run | 17 + test/946-obsolete-throw/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/946-obsolete-throw/src/art/Test946.java | 95 + test/947-reflect-method/expected.txt | 2 + test/947-reflect-method/info.txt | 4 + test/947-reflect-method/run | 17 + test/947-reflect-method/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/947-reflect-method/src/art/Test947.java | 82 + test/948-change-annotations/build | 17 + test/948-change-annotations/expected.txt | 21 + test/948-change-annotations/info.txt | 1 + test/948-change-annotations/run | 17 + .../src/AddAnnotationsTest.java | 70 + .../src/ChangeAnnotationValues.java | 64 + test/948-change-annotations/src/Main.java | 93 + .../src/RemoveAnnotationsTest.java | 55 + test/948-change-annotations/src/TestCase.java | 19 + .../src/TestClassAnnotation1.java | 22 + .../src/TestClassAnnotation2.java | 22 + .../src/TestMethodAnnotation1.java | 22 + .../src/TestMethodAnnotation2.java | 22 + .../948-change-annotations/src/Transform.java | 23 + .../src/art/Redefinition.java | 1 + test/949-in-memory-transform/expected.txt | 2 + test/949-in-memory-transform/info.txt | 4 + test/949-in-memory-transform/run | 17 + test/949-in-memory-transform/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test949.java | 123 + test/950-redefine-intrinsic/expected.txt | 1 + test/950-redefine-intrinsic/info.txt | 3 + test/950-redefine-intrinsic/run | 17 + test/950-redefine-intrinsic/src/Main.java | 467 + .../src/RedefinedLongIntrinsics.java | 70 + .../src/art/Redefinition.java | 1 + test/951-threaded-obsolete/expected.txt | 9 + test/951-threaded-obsolete/info.txt | 4 + test/951-threaded-obsolete/run | 17 + test/951-threaded-obsolete/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test951.java | 108 + test/952-invoke-custom/build | 73 + test/952-invoke-custom/expected.txt | 93 + test/952-invoke-custom/info.txt | 1 + test/952-invoke-custom/src/Main.java | 104 + .../src/TestBadBootstrapArguments.java | 583 + test/952-invoke-custom/src/TestBase.java | 95 + .../src/TestDynamicBootstrapArguments.java | 92 + .../src/TestInvocationKinds.java | 218 + ...TestInvokeCustomWithConcurrentThreads.java | 160 + .../src/TestLinkerMethodMinimalArguments.java | 84 + ...TestLinkerMethodMultipleArgumentTypes.java | 117 + .../src/TestLinkerMethodWithRange.java | 169 + .../src/TestLinkerUnrelatedBSM.java | 81 + .../src/TestReturnValues.java | 330 + .../src/TestVariableArityLinkerMethod.java | 569 + test/952-invoke-custom/src/UnrelatedBSM.java | 30 + .../util-src/annotations/BootstrapMethod.java | 45 + .../util-src/annotations/CalledByIndy.java | 44 + .../util-src/annotations/Constant.java | 47 + .../util-src/transformer/IndyTransformer.java | 202 + test/953-invoke-polymorphic-compiler/build | 20 + .../expected.txt | 25 + test/953-invoke-polymorphic-compiler/info.txt | 3 + .../src/Main.java | 374 + test/954-invoke-polymorphic-verifier/build | 20 + test/954-invoke-polymorphic-verifier/check | 19 + .../expected.txt | 16 + test/954-invoke-polymorphic-verifier/info.txt | 3 + .../smali/BadThis.smali | 30 + .../BetterFakeSignaturePolymorphic.smali | 43 + .../smali/FakeSignaturePolymorphic.smali | 43 + .../smali/Main.smali | 91 + .../smali/MethodHandleNotInvoke.smali | 37 + .../smali/MethodHandleToString.smali | 35 + .../smali/NonReference.smali | 30 + .../smali/Subclass.smali | 45 + .../smali/TooFewArguments.smali | 33 + .../smali/TooManyArguments.smali | 35 + .../smali/Unresolved.smali | 40 + .../smali/VarHandleHappyAccessors.smali | 72 + .../smali/VarHandleUnhappyAccessors.smali | 70 + .../smali/VarHandleUnknownAccessor.smali | 37 + test/955-methodhandles-smali/build | 20 + test/955-methodhandles-smali/expected.txt | 9 + test/955-methodhandles-smali/info.txt | 3 + test/955-methodhandles-smali/smali/Main.smali | 241 + test/956-methodhandles/build | 20 + test/956-methodhandles/expected.txt | 39 + test/956-methodhandles/info.txt | 3 + test/956-methodhandles/src/Main.java | 1903 ++++ test/956-methodhandles/src/other/Chatty.java | 23 + test/957-methodhandle-transforms/build | 20 + test/957-methodhandle-transforms/expected.txt | 78 + test/957-methodhandle-transforms/info.txt | 3 + .../957-methodhandle-transforms/src/Main.java | 1669 +++ test/958-methodhandle-stackframe/build | 20 + test/958-methodhandle-stackframe/expected.txt | 32 + test/958-methodhandle-stackframe/info.txt | 5 + .../src-art/Main.java | 175 + test/959-invoke-polymorphic-accessors/build | 20 + .../expected.txt | 5 + .../959-invoke-polymorphic-accessors/info.txt | 1 + .../src/Main.java | 1047 ++ test/960-default-smali/build | 23 + test/960-default-smali/expected.txt | 131 + test/960-default-smali/info.txt | 20 + test/960-default-smali/src/A.java | 20 + test/960-default-smali/src/Attendant.java | 24 + test/960-default-smali/src/B.java | 20 + test/960-default-smali/src/C.java | 20 + test/960-default-smali/src/D.java | 20 + test/960-default-smali/src/E.java | 20 + test/960-default-smali/src/Extension.java | 20 + test/960-default-smali/src/F.java | 23 + test/960-default-smali/src/Foo.java | 20 + test/960-default-smali/src/Foo2.java | 25 + test/960-default-smali/src/Foo3.java | 22 + test/960-default-smali/src/Fooer.java | 19 + test/960-default-smali/src/G.java | 20 + test/960-default-smali/src/Greeter.java | 21 + test/960-default-smali/src/Greeter2.java | 20 + test/960-default-smali/src/Greeter3.java | 21 + test/960-default-smali/src/H.java | 16 + test/960-default-smali/src/I.java | 16 + test/960-default-smali/src/J.java | 16 + test/960-default-smali/src/K.java | 17 + test/960-default-smali/src/L.java | 17 + test/960-default-smali/src/M.java | 21 + test/960-default-smali/src/N.java | 21 + test/960-default-smali/src/O.java | 21 + test/960-default-smali/src/P.java | 25 + test/960-default-smali/src/Q.java | 21 + test/960-default-smali/src/classes.xml | 214 + test/960-default-smali/src2/Foo.java | 20 + test/960-default-smali/src2/Foo3.java | 25 + test/961-default-iface-resolution-gen/build | 25 + .../expected.txt | 1 + .../961-default-iface-resolution-gen/info.txt | 17 + test/961-default-iface-resolution-gen/run | 18 + .../util-src/generate_java.py | 378 + test/962-iface-static/build | 20 + test/962-iface-static/expected.txt | 3 + test/962-iface-static/info.txt | 4 + test/962-iface-static/src/Displayer.java | 23 + test/962-iface-static/src/Iface.java | 21 + test/962-iface-static/src/Main.java | 20 + test/963-default-range-smali/expected.txt | 2 + test/963-default-range-smali/info.txt | 4 + test/963-default-range-smali/src/A.java | 16 + test/963-default-range-smali/src/Iface.java | 29 + test/963-default-range-smali/src/Main.java | 41 + test/964-default-iface-init-gen/build | 25 + test/964-default-iface-init-gen/expected.txt | 1 + test/964-default-iface-init-gen/info.txt | 17 + .../src/Displayer.java | 24 + .../util-src/generate_java.py | 419 + test/965-default-verify/expected.txt | 15 + test/965-default-verify/info.txt | 8 + test/965-default-verify/src/Iface.java | 23 + test/965-default-verify/src/Main.java | 61 + test/965-default-verify/src/Statics.java | 21 + test/965-default-verify/src2/Statics.java | 20 + test/966-default-conflict/expected.txt | 19 + test/966-default-conflict/info.txt | 6 + test/966-default-conflict/src/Iface.java | 23 + test/966-default-conflict/src/Iface2.java | 25 + test/966-default-conflict/src/Main.java | 80 + test/966-default-conflict/src2/Iface2.java | 20 + test/967-default-ame/expected.txt | 18 + test/967-default-ame/info.txt | 6 + test/967-default-ame/src/Iface.java | 23 + test/967-default-ame/src/Iface2.java | 21 + test/967-default-ame/src/Iface3.java | 19 + test/967-default-ame/src/Main.java | 71 + test/967-default-ame/src2/Iface.java | 23 + test/967-default-ame/src2/Iface2.java | 18 + test/967-default-ame/src2/Iface3.java | 18 + test/968-default-partial-compile-gen/build | 36 + .../expected.txt | 1 + test/968-default-partial-compile-gen/info.txt | 17 + .../util-src/generate_java.py | 134 + .../util-src/generate_smali.py | 607 + test/969-iface-super/build | 23 + test/969-iface-super/expected.txt | 47 + test/969-iface-super/info.txt | 6 + test/969-iface-super/src/A.java | 16 + test/969-iface-super/src/B.java | 16 + test/969-iface-super/src/C.java | 20 + test/969-iface-super/src/D.java | 20 + test/969-iface-super/src/E.java | 20 + test/969-iface-super/src/F.java | 20 + test/969-iface-super/src/G.java | 23 + test/969-iface-super/src/H.java | 26 + test/969-iface-super/src/Iface.java | 20 + test/969-iface-super/src/Iface2.java | 20 + test/969-iface-super/src/Iface3.java | 16 + test/969-iface-super/src/classes.xml | 99 + test/970-iface-super-resolution-gen/build | 33 + .../expected.txt | 1 + test/970-iface-super-resolution-gen/info.txt | 17 + .../util-src/generate_java.py | 77 + .../util-src/generate_smali.py | 614 + test/971-iface-super/build | 36 + test/971-iface-super/expected.txt | 1 + test/971-iface-super/info.txt | 17 + .../971-iface-super/util-src/generate_java.py | 138 + .../util-src/generate_smali.py | 689 ++ test/972-default-imt-collision/expected.txt | 0 test/972-default-imt-collision/info.txt | 1 + .../smali/Iface1.smali | 32 + .../smali/Iface2.smali | 277 + .../smali/Klass.smali | 993 ++ test/972-default-imt-collision/src/Main.java | 31 + test/972-iface-super-multidex/expected.txt | 2 + test/972-iface-super-multidex/info.txt | 3 + .../smali-multidex/conflictinterface.smali | 23 + .../smali-multidex/oneconflict.smali | 31 + .../smali-multidex/superinterface.smali | 31 + .../smali-multidex/twoconflict.smali | 31 + .../smali/concreteclass.smali | 62 + test/972-iface-super-multidex/src/Main.java | 55 + test/973-default-multidex/expected.txt | 1 + test/973-default-multidex/info.txt | 5 + .../smali-multidex/iface.smali | 40 + .../smali/concreteclass.smali | 47 + test/973-default-multidex/src/Main.java | 31 + test/974-verify-interface-super/expected.txt | 1 + test/974-verify-interface-super/info.txt | 3 + .../smali/base.smali | 31 + .../smali/iface.smali | 22 + .../smali/main.smali | 40 + test/975-iface-private/build | 20 + test/975-iface-private/expected.txt | 4 + test/975-iface-private/info.txt | 5 + test/975-iface-private/smali/Iface.smali | 45 + test/975-iface-private/smali/Main.smali | 71 + test/976-conflict-no-methods/expected.txt | 1 + test/976-conflict-no-methods/info.txt | 1 + .../976-conflict-no-methods/smali/Iface.smali | 281 + test/976-conflict-no-methods/smali/Main.smali | 358 + .../smali/NoMethods.smali | 19 + test/978-virtual-interface/build | 20 + test/978-virtual-interface/expected.txt | 1 + test/978-virtual-interface/info.txt | 7 + test/978-virtual-interface/smali/Iface.smali | 110 + test/978-virtual-interface/smali/Main.smali | 50 + .../978-virtual-interface/smali/Subtype.smali | 40 + test/978-virtual-interface/smali/Target.smali | 40 + test/979-const-method-handle/build | 57 + test/979-const-method-handle/expected.txt | 17 + test/979-const-method-handle/info.txt | 1 + test/979-const-method-handle/src/Main.java | 247 + .../annotations/ConstantMethodHandle.java | 58 + .../annotations/ConstantMethodType.java | 38 + .../transformer/ConstantTransformer.java | 229 + test/980-redefine-object/check | 23 + test/980-redefine-object/expected.txt | 36 + test/980-redefine-object/info.txt | 23 + test/980-redefine-object/redef_object.cc | 143 + test/980-redefine-object/run | 17 + .../src-ex/TestWatcher.java | 75 + test/980-redefine-object/src/Main.java | 154 + test/980-redefine-object/src/Transform.java | 17 + test/981-dedup-original-dex/expected.txt | 0 test/981-dedup-original-dex/info.txt | 4 + test/981-dedup-original-dex/run | 17 + test/981-dedup-original-dex/src-art/Main.java | 21 + .../src-art/art/Redefinition.java | 91 + .../src-art/art/Test981.java | 210 + test/982-ok-no-retransform/expected.txt | 2 + test/982-ok-no-retransform/info.txt | 1 + test/982-ok-no-retransform/run | 17 + test/982-ok-no-retransform/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test982.java | 39 + test/983-source-transform-verify/expected.txt | 3 + test/983-source-transform-verify/info.txt | 1 + test/983-source-transform-verify/run | 17 + .../source_transform.cc | 76 + .../source_transform.h | 30 + .../source_transform_art.cc | 73 + .../source_transform_slicer.cc | 36 + .../983-source-transform-verify/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + .../src/art/Test983.java | 46 + test/984-obsolete-invoke/expected.txt | 10 + test/984-obsolete-invoke/info.txt | 4 + test/984-obsolete-invoke/obsolete_invoke.cc | 70 + test/984-obsolete-invoke/run | 17 + test/984-obsolete-invoke/src/Main.java | 21 + .../src/art/Redefinition.java | 1 + test/984-obsolete-invoke/src/art/Test984.java | 121 + test/985-re-obsolete/expected.txt | 35 + test/985-re-obsolete/info.txt | 4 + test/985-re-obsolete/run | 17 + test/985-re-obsolete/src/Main.java | 21 + .../985-re-obsolete/src/art/Redefinition.java | 1 + test/985-re-obsolete/src/art/Test985.java | 197 + test/986-native-method-bind/expected.txt | 11 + test/986-native-method-bind/info.txt | 1 + test/986-native-method-bind/native_bind.cc | 128 + test/986-native-method-bind/run | 17 + test/986-native-method-bind/src/Main.java | 21 + .../src/art/Test986.java | 97 + test/987-agent-bind/agent_bind.cc | 53 + test/987-agent-bind/expected.txt | 2 + test/987-agent-bind/info.txt | 1 + test/987-agent-bind/run | 17 + test/987-agent-bind/src/Main.java | 21 + test/987-agent-bind/src/art/Test987.java | 42 + test/988-method-trace/expected.txt | 533 + test/988-method-trace/gen_srcs.py | 323 + test/988-method-trace/info.txt | 15 + test/988-method-trace/run | 18 + test/988-method-trace/src/Main.java | 21 + test/988-method-trace/src/art/Test988.java | 420 + .../src/art/Test988Intrinsics.java | 135 + test/988-method-trace/src/art/Trace.java | 1 + test/988-method-trace/trace_fib.cc | 41 + test/989-method-trace-throw/expected.txt | 188 + test/989-method-trace-throw/info.txt | 15 + test/989-method-trace-throw/method_trace.cc | 75 + test/989-method-trace-throw/run | 18 + test/989-method-trace-throw/src/Main.java | 21 + .../src/art/Test989.java | 465 + .../989-method-trace-throw/src/art/Trace.java | 1 + test/990-field-trace/expected.txt | 52 + test/990-field-trace/info.txt | 1 + test/990-field-trace/run | 18 + test/990-field-trace/src/Main.java | 21 + test/990-field-trace/src/art/Test990.java | 232 + test/990-field-trace/src/art/Trace.java | 1 + test/991-field-trace-2/expected.txt | 118 + test/991-field-trace-2/field_trace.cc | 59 + test/991-field-trace-2/info.txt | 5 + test/991-field-trace-2/run | 18 + test/991-field-trace-2/src/Main.java | 21 + test/991-field-trace-2/src/art/Test991.java | 219 + test/991-field-trace-2/src/art/Trace.java | 1 + test/992-source-data/expected.txt | 22 + test/992-source-data/info.txt | 1 + test/992-source-data/run | 17 + test/992-source-data/source_file.cc | 67 + test/992-source-data/src/Main.java | 21 + test/992-source-data/src/art/Target2.java | 19 + test/992-source-data/src/art/Test992.java | 64 + test/993-breakpoints/breakpoints.cc | 121 + test/993-breakpoints/expected.txt | 714 ++ test/993-breakpoints/info.txt | 7 + test/993-breakpoints/run | 18 + test/993-breakpoints/src/Main.java | 21 + test/993-breakpoints/src/art/Breakpoint.java | 1 + test/993-breakpoints/src/art/Test993.java | 652 ++ test/994-breakpoint-line/expected.txt | 34 + test/994-breakpoint-line/info.txt | 5 + test/994-breakpoint-line/run | 18 + test/994-breakpoint-line/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + test/994-breakpoint-line/src/art/Test994.java | 72 + test/995-breakpoints-throw/expected.txt | 34 + test/995-breakpoints-throw/info.txt | 6 + test/995-breakpoints-throw/run | 18 + test/995-breakpoints-throw/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Test995.java | 136 + test/996-breakpoint-obsolete/expected.txt | 14 + test/996-breakpoint-obsolete/info.txt | 4 + .../obsolete_breakpoints.cc | 77 + test/996-breakpoint-obsolete/run | 18 + test/996-breakpoint-obsolete/src/Main.java | 21 + .../src/art/Breakpoint.java | 1 + .../src/art/Redefinition.java | 1 + .../src/art/Test996.java | 152 + test/997-single-step/expected.txt | 12 + test/997-single-step/info.txt | 3 + test/997-single-step/run | 18 + test/997-single-step/src/Main.java | 21 + test/997-single-step/src/art/Breakpoint.java | 1 + test/997-single-step/src/art/Test997.java | 82 + test/997-single-step/src/art/Trace.java | 1 + test/998-redefine-use-after-free/expected.txt | 0 test/998-redefine-use-after-free/info.txt | 13 + test/998-redefine-use-after-free/run | 17 + .../src-ex/DexCacheSmash.java | 155 + .../src-ex/art/Redefinition.java | 91 + .../998-redefine-use-after-free/src/Main.java | 54 + test/999-redefine-hiddenapi/build | 17 + test/999-redefine-hiddenapi/expected.txt | 1 + .../hiddenapi-flags.csv | 2 + test/999-redefine-hiddenapi/info.txt | 1 + test/999-redefine-hiddenapi/run | 17 + .../src-ex/Test999.java | 25 + .../src-redefine/art/Test999.java | 25 + .../src-redefine/gen.sh | 32 + test/999-redefine-hiddenapi/src/Main.java | 110 + .../src/art/Redefinition.java | 1 + test/AbstractMethod/AbstractClass.java | 31 + test/AllFields/AllFields.java | 39 + test/AllFields/AllFieldsSub.java | 17 + test/AllFields/AllFieldsUnrelated.java | 17 + test/Android.bp | 1376 +++ test/Android.run-test.mk | 173 + test/DefaultMethods/IterableBase.java | 27 + .../Dex2oatVdexTestDex.java | 83 + test/DexToDexDecompiler/Main.java | 34 + test/ErroneousA/ErroneousA.java | 17 + test/ErroneousB/ErroneousB.java | 20 + test/ErroneousInit/ErroneousInit.java | 23 + test/ExceptionHandle/ExceptionHandle.java | 42 + test/Extension1/ExtensionClass1.java | 20 + test/Extension2/ExtensionClass2.java | 21 + test/ForClassLoaderA/Classes.java | 31 + test/ForClassLoaderB/Classes.java | 30 + test/ForClassLoaderC/Classes.java | 30 + test/ForClassLoaderD/Classes.java | 27 + .../GetMethodSignature.java | 29 + test/HiddenApi/AbstractPackageClass.java | 19 + test/HiddenApi/Main.java | 26 + test/HiddenApi/PackageClass.java | 19 + test/HiddenApi/PublicInterface.java | 20 + test/HiddenApiSignatures/Class1.java | 35 + test/HiddenApiSignatures/Class12.java | 24 + test/HiddenApiSignatures/Class2.java | 27 + test/HiddenApiSignatures/Class3.java | 27 + test/HiddenApiSignatures/Interface.java | 21 + test/HiddenApiStubs/HiddenApi | 19 + test/HiddenApiStubs/PublicInterface.java | 20 + test/IMTA/Interfaces.java | 24 + test/IMTB/Interfaces.java | 28 + test/ImageLayoutA/ImageLayoutA.java | 21 + test/ImageLayoutB/ImageLayoutB.java | 25 + test/Instrumentation/Instrumentation.java | 35 + test/Interfaces/Interfaces.java | 41 + test/Lookup/A.java | 17 + test/Lookup/AB.java | 17 + test/Lookup/C.java | 17 + test/Main/Main.java | 20 + test/Main/empty.dex | 0 test/ManyMethods/ManyMethods.java | 107 + test/MethodTypes/MethodTypes.java | 20 + test/MultiDex/Main.java | 22 + test/MultiDex/Second.java | 27 + test/MultiDex/main.jpp | 3 + test/MultiDex/main.list | 1 + test/MultiDexModifiedSecondary/Main.java | 22 + test/MultiDexModifiedSecondary/README.txt | 4 + test/MultiDexModifiedSecondary/Second.java | 25 + test/MultiDexModifiedSecondary/main.jpp | 3 + test/MultiDexModifiedSecondary/main.list | 1 + test/MyClass/MyClass.java | 17 + test/MyClassNatives/MyClassNatives.java | 340 + test/Nested/Nested.java | 22 + .../NonStaticLeafMethods.java | 55 + test/Packages/Package1.java | 20 + test/Packages/Package2.java | 20 + test/ProfileTestMultiDex/Main.java | 223 + test/ProfileTestMultiDex/Second.java | 195 + test/ProfileTestMultiDex/main.jpp | 24 + test/ProfileTestMultiDex/main.list | 8 + test/ProtoCompare/ProtoCompare.java | 22 + test/ProtoCompare2/ProtoCompare2.java | 22 + test/README.chroot.md | 130 + test/README.md | 149 + test/StaticLeafMethods/StaticLeafMethods.java | 53 + test/Statics/Statics.java | 55 + test/StaticsFromCode/StaticsFromCode.java | 23 + test/StringLiterals/StringLiterals.java | 47 + test/Transaction/InstanceFieldsTest.java | 27 + test/Transaction/StaticArrayFieldsTest.java | 27 + test/Transaction/StaticFieldsTest.java | 27 + test/Transaction/Transaction.java | 85 + test/VerifierDeps/Iface.smali | 16 + test/VerifierDeps/Main.smali | 462 + .../MyClassExtendingInterface.smali | 16 + test/VerifierDeps/MyClassWithNoSuper.smali | 16 + .../MyClassWithNoSuperButFailures.smali | 21 + test/VerifierDeps/MyDOMResult.smali | 16 + test/VerifierDeps/MyDocument.smali | 17 + test/VerifierDeps/MyErroneousTimeZone.smali | 22 + test/VerifierDeps/MyResult.smali | 17 + test/VerifierDeps/MySSLSocket.smali | 16 + test/VerifierDeps/MySimpleTimeZone.smali | 24 + .../MySocketTimeoutException.smali | 16 + .../MySub1SoftVerificationFailure.smali | 16 + .../MySub2SoftVerificationFailure.smali | 16 + test/VerifierDeps/MyThread.smali | 16 + test/VerifierDeps/MyThreadSet.smali | 17 + test/VerifierDeps/MyVerificationFailure.smali | 21 + .../VerifierDeps/SocketTimeoutException.smali | 16 + .../MySoftVerificationFailure.smali | 24 + .../ClassToInitialize.smali | 22 + .../VerifySoftFail.smali | 27 + test/XandY/X.java | 17 + test/XandY/Y.java | 21 + test/common/runtime_state.cc | 434 + test/common/stack_inspect.cc | 210 + test/dexdump/all.dex | Bin 0 -> 2572 bytes test/dexdump/all.lst | 21 + test/dexdump/all.txt | 622 ++ test/dexdump/all.xml | 211 + test/dexdump/bytecodes.dex | Bin 0 -> 10288 bytes test/dexdump/bytecodes.lst | 20 + test/dexdump/bytecodes.txt | 1852 +++ test/dexdump/bytecodes.xml | 171 + test/dexdump/checkers.dex | Bin 0 -> 35384 bytes test/dexdump/checkers.lst | 82 + test/dexdump/checkers.txt | 7821 +++++++++++++ test/dexdump/checkers.xml | 676 ++ test/dexdump/const-method-handle.dex | Bin 0 -> 2524 bytes test/dexdump/const-method-handle.lst | 9 + test/dexdump/const-method-handle.txt | 275 + test/dexdump/const-method-handle.xml | 91 + test/dexdump/invoke-custom.dex | Bin 0 -> 31732 bytes test/dexdump/invoke-custom.lst | 145 + test/dexdump/invoke-custom.txt | 5725 ++++++++++ test/dexdump/invoke-custom.xml | 695 ++ test/dexdump/invoke-polymorphic.dex | Bin 0 -> 1160 bytes test/dexdump/invoke-polymorphic.lst | 3 + test/dexdump/invoke-polymorphic.txt | 109 + test/dexdump/invoke-polymorphic.xml | 33 + test/dexdump/run-all-tests | 125 + test/dexdump/staticfields.dex | Bin 0 -> 1264 bytes test/dexdump/staticfields.lst | 2 + test/dexdump/staticfields.txt | 126 + test/dexdump/staticfields.xml | 130 + test/dexdump/values.dex | Bin 0 -> 1864 bytes test/dexdump/values.lst | 3 + test/dexdump/values.txt | 355 + test/dexdump/values.xml | 561 + test/etc/default-build | 511 + test/etc/default-check | 17 + test/etc/default-run | 17 + test/etc/run-test-jar | 1364 +++ test/jvmti-common/Breakpoint.java | 202 + test/jvmti-common/Exceptions.java | 33 + test/jvmti-common/FramePop.java | 25 + test/jvmti-common/Locals.java | 121 + test/jvmti-common/Main.java | 32 + test/jvmti-common/Monitors.java | 344 + test/jvmti-common/NonStandardExit.java | 50 + test/jvmti-common/Redefinition.java | 114 + test/jvmti-common/StackTrace.java | 68 + test/jvmti-common/SuspendEvents.java | 65 + test/jvmti-common/Suspension.java | 30 + test/jvmti-common/Threads.java | 22 + test/jvmti-common/Trace.java | 68 + test/knownfailures.json | 1378 +++ test/run-test | 1164 ++ test/testrunner/device_config.py | 20 + test/testrunner/env.py | 147 + test/testrunner/run_build_test_target.py | 135 + test/testrunner/target_config.py | 309 + test/testrunner/testrunner.py | 1140 ++ test/ti-agent/agent_common.cc | 42 + test/ti-agent/agent_startup.cc | 39 + test/ti-agent/breakpoint_helper.cc | 205 + test/ti-agent/common_helper.cc | 118 + test/ti-agent/common_helper.h | 35 + test/ti-agent/common_load.cc | 168 + test/ti-agent/early_return_helper.cc | 65 + test/ti-agent/exceptions_helper.cc | 199 + test/ti-agent/frame_pop_helper.cc | 131 + test/ti-agent/jni_binder.cc | 259 + test/ti-agent/jni_binder.h | 42 + test/ti-agent/jni_helper.h | 72 + test/ti-agent/jvmti_helper.cc | 283 + test/ti-agent/jvmti_helper.h | 101 + test/ti-agent/locals_helper.cc | 210 + test/ti-agent/monitors_helper.cc | 223 + test/ti-agent/redefinition_helper.cc | 503 + test/ti-agent/scoped_local_ref.h | 66 + test/ti-agent/scoped_primitive_array.h | 154 + test/ti-agent/scoped_utf_chars.h | 92 + test/ti-agent/stack_trace_helper.cc | 99 + test/ti-agent/suspend_event_helper.cc | 803 ++ test/ti-agent/suspend_event_helper.h | 30 + test/ti-agent/suspension_helper.cc | 98 + test/ti-agent/test_env.cc | 34 + test/ti-agent/test_env.h | 36 + test/ti-agent/threads_helper.cc | 41 + test/ti-agent/ti_macros.h | 24 + test/ti-agent/ti_utf.h | 196 + test/ti-agent/trace_helper.cc | 685 ++ test/ti-stress/stress.cc | 911 ++ test/utils/get-device-isa | 72 + test/utils/get-device-test-native-lib-path | 47 + test/utils/python/generate_java_main.py | 313 + test/utils/python/testgen/mixins.py | 141 + test/utils/python/testgen/utils.py | 80 + tools/Android.bp | 38 + tools/add_package_property.sh | 29 + tools/ahat/Android.bp | 61 + tools/ahat/Android.mk | 136 + tools/ahat/README.txt | 140 + tools/ahat/TEST_MAPPING | 7 + tools/ahat/ahat | 20 + tools/ahat/ahat-tests.xml | 23 + tools/ahat/etc/L.hprof | Bin 0 -> 11930828 bytes tools/ahat/etc/O.hprof | Bin 0 -> 5921872 bytes tools/ahat/etc/README.txt | 9 + tools/ahat/etc/RI.hprof | Bin 0 -> 1819716 bytes tools/ahat/etc/ahat-tests.mf | 1 + tools/ahat/etc/ahat.mf | 4 + tools/ahat/etc/ahat_api.txt | 360 + tools/ahat/etc/ahat_api_msg.txt | 5 + tools/ahat/etc/ahat_removed_api.txt | 0 tools/ahat/etc/hprofdump.py | 331 + tools/ahat/etc/style.css | 41 + tools/ahat/etc/test-dump.pro | 14 + .../main/com/android/ahat/AhatHandler.java | 32 + .../com/android/ahat/AhatHttpHandler.java | 56 + .../main/com/android/ahat/AsciiProgress.java | 69 + .../main/com/android/ahat/BitmapHandler.java | 69 + .../src/main/com/android/ahat/Column.java | 58 + tools/ahat/src/main/com/android/ahat/Doc.java | 97 + .../src/main/com/android/ahat/DocString.java | 229 + .../main/com/android/ahat/DominatedList.java | 75 + .../src/main/com/android/ahat/HeapTable.java | 163 + .../src/main/com/android/ahat/HtmlDoc.java | 191 + .../main/com/android/ahat/HtmlEscaper.java | 48 + .../ahat/src/main/com/android/ahat/Main.java | 198 + .../ahat/src/main/com/android/ahat/Menu.java | 36 + .../main/com/android/ahat/ObjectHandler.java | 311 + .../main/com/android/ahat/ObjectsHandler.java | 137 + .../com/android/ahat/OverviewHandler.java | 81 + .../ahat/src/main/com/android/ahat/Query.java | 115 + .../main/com/android/ahat/RootedHandler.java | 37 + .../main/com/android/ahat/SiteHandler.java | 114 + .../main/com/android/ahat/SitePrinter.java | 63 + .../src/main/com/android/ahat/SizeTable.java | 106 + .../main/com/android/ahat/StaticHandler.java | 60 + .../main/com/android/ahat/SubsetSelector.java | 109 + .../src/main/com/android/ahat/Summarizer.java | 163 + .../android/ahat/dominators/Dominators.java | 476 + .../dominators/DominatorsComputation.java | 147 + .../ahat/heapdump/AhatArrayInstance.java | 372 + .../ahat/heapdump/AhatClassInstance.java | 465 + .../android/ahat/heapdump/AhatClassObj.java | 160 + .../com/android/ahat/heapdump/AhatHeap.java | 101 + .../android/ahat/heapdump/AhatInstance.java | 808 ++ .../heapdump/AhatPlaceHolderClassObj.java | 75 + .../heapdump/AhatPlaceHolderInstance.java | 76 + .../android/ahat/heapdump/AhatSnapshot.java | 211 + .../main/com/android/ahat/heapdump/Diff.java | 353 + .../com/android/ahat/heapdump/DiffFields.java | 93 + .../com/android/ahat/heapdump/Diffable.java | 47 + .../ahat/heapdump/DiffedFieldValue.java | 151 + .../heapdump/DominatorReferenceIterator.java | 63 + .../main/com/android/ahat/heapdump/Field.java | 43 + .../com/android/ahat/heapdump/FieldValue.java | 50 + .../ahat/heapdump/HprofFormatException.java | 31 + .../com/android/ahat/heapdump/Instances.java | 99 + .../com/android/ahat/heapdump/Parser.java | 1116 ++ .../android/ahat/heapdump/PathElement.java | 77 + .../android/ahat/heapdump/Reachability.java | 81 + .../com/android/ahat/heapdump/Reference.java | 37 + .../com/android/ahat/heapdump/RootType.java | 100 + .../main/com/android/ahat/heapdump/Site.java | 458 + .../main/com/android/ahat/heapdump/Size.java | 128 + .../ahat/heapdump/SkipNullsIterator.java | 56 + .../main/com/android/ahat/heapdump/Sort.java | 272 + .../com/android/ahat/heapdump/SuperRoot.java | 60 + .../main/com/android/ahat/heapdump/Type.java | 96 + .../main/com/android/ahat/heapdump/Value.java | 551 + .../android/ahat/progress/NullProgress.java | 28 + .../com/android/ahat/progress/Progress.java | 65 + .../android/ahat/proguard/ProguardMap.java | 451 + tools/ahat/src/ri-test-dump/DumpedStuff.java | 40 + tools/ahat/src/ri-test-dump/Main.java | 43 + tools/ahat/src/test-dump/DumpedStuff.java | 224 + tools/ahat/src/test-dump/Main.java | 58 + .../ahat/src/test-dump/SuperDumpedStuff.java | 36 + .../ahat/src/test-dump/android/os/Binder.java | 46 + .../src/test-dump/android/os/BinderProxy.java | 20 + .../src/test-dump/android/os/IBinder.java | 20 + .../test/com/android/ahat/AhatTestSuite.java | 50 + .../test/com/android/ahat/DiffFieldsTest.java | 193 + .../src/test/com/android/ahat/DiffTest.java | 87 + .../test/com/android/ahat/DominatorsTest.java | 377 + .../com/android/ahat/HtmlEscaperTest.java | 32 + .../test/com/android/ahat/InstanceTest.java | 617 + .../android/ahat/NativeAllocationTest.java | 37 + .../com/android/ahat/ObjectHandlerTest.java | 74 + .../com/android/ahat/ObjectsHandlerTest.java | 55 + .../com/android/ahat/OverviewHandlerTest.java | 36 + .../com/android/ahat/PerformanceTest.java | 57 + .../com/android/ahat/ProguardMapTest.java | 179 + .../src/test/com/android/ahat/QueryTest.java | 73 + .../src/test/com/android/ahat/RiTest.java | 45 + .../com/android/ahat/RootedHandlerTest.java | 30 + .../com/android/ahat/SiteHandlerTest.java | 30 + .../src/test/com/android/ahat/SiteTest.java | 135 + .../src/test/com/android/ahat/TestDump.java | 286 + .../test/com/android/ahat/TestHandler.java | 41 + tools/analyze-init-failures.py | 157 + tools/art | 649 ++ tools/art_verifier/Android.bp | 63 + tools/art_verifier/art_verifier.cc | 277 + tools/asan.sh | 21 + tools/bisection_search/README.md | 70 + tools/bisection_search/__init__.py | 17 + tools/bisection_search/bisection_search.py | 434 + tools/bisection_search/bisection_test.py | 99 + tools/boot-image-profile-configure-device.sh | 59 + tools/boot-image-profile-extract-profile.sh | 32 + tools/boot-image-profile-generate.sh | 150 + tools/bootjars.sh | 111 + tools/build/var_cache.py | 146 + tools/build/var_cache.sh | 193 + tools/build/var_list | 38 + tools/build_linux_bionic.sh | 89 + tools/build_linux_bionic_tests.sh | 105 + tools/buildbot-build.sh | 163 + tools/buildbot-cleanup-device.sh | 50 + tools/buildbot-setup-device.sh | 176 + tools/buildbot-symbolize-crashes.sh | 27 + tools/buildbot-sync.sh | 129 + tools/buildbot-teardown-device.sh | 147 + tools/checker/README | 85 + tools/checker/checker.py | 109 + tools/checker/common/__init__.py | 13 + tools/checker/common/archs.py | 15 + tools/checker/common/immutables.py | 25 + tools/checker/common/logger.py | 99 + tools/checker/common/mixins.py | 26 + tools/checker/common/testing.py | 22 + tools/checker/file_format/__init__.py | 13 + .../file_format/c1visualizer/__init__.py | 13 + .../file_format/c1visualizer/parser.py | 90 + .../file_format/c1visualizer/struct.py | 60 + .../checker/file_format/c1visualizer/test.py | 105 + tools/checker/file_format/checker/__init__.py | 13 + tools/checker/file_format/checker/parser.py | 206 + tools/checker/file_format/checker/struct.py | 173 + tools/checker/file_format/checker/test.py | 424 + tools/checker/file_format/common.py | 51 + tools/checker/match/__init__.py | 13 + tools/checker/match/file.py | 193 + tools/checker/match/line.py | 115 + tools/checker/match/test.py | 402 + tools/checker/run_unit_tests.py | 31 + tools/class2greylist/Android.bp | 35 + tools/class2greylist/src/class2greylist.mf | 1 + .../AlternativeNotFoundError.java | 20 + .../class2greylist/AnnotatedClassContext.java | 54 + .../AnnotatedMemberContext.java | 59 + .../class2greylist/AnnotationConsumer.java | 18 + .../class2greylist/AnnotationContext.java | 45 + .../class2greylist/AnnotationHandler.java | 27 + .../AnnotationPropertyWriter.java | 71 + .../class2greylist/AnnotationVisitor.java | 96 + .../android/class2greylist/ApiComponents.java | 327 + .../android/class2greylist/ApiResolver.java | 111 + .../class2greylist/Class2Greylist.java | 260 + .../ClassAlternativeNotFoundError.java | 30 + .../CovariantReturnTypeHandler.java | 104 + .../android/class2greylist/ErrorReporter.java | 25 + .../class2greylist/HiddenapiFlagsWriter.java | 40 + .../com/android/class2greylist/JarReader.java | 65 + .../JavadocLinkSyntaxError.java | 31 + .../MemberAlternativeNotFoundError.java | 30 + .../class2greylist/MemberDumpingVisitor.java | 48 + .../MultipleAlternativesFoundError.java | 38 + .../NoAlternativesSpecifiedError.java | 30 + .../class2greylist/PackageAndClassName.java | 67 + .../RepeatedAnnotationHandler.java | 57 + .../RequiredAlternativeNotSpecifiedError.java | 26 + .../class2greylist/SignatureSyntaxError.java | 29 + .../com/android/class2greylist/Status.java | 54 + .../android/class2greylist/StringCursor.java | 131 + .../StringCursorOutOfBoundsException.java | 21 + .../UnsupportedAppUsageAnnotationHandler.java | 181 + tools/class2greylist/test/Android.bp | 29 + tools/class2greylist/test/AndroidTest.xml | 21 + .../AnnotationHandlerTestBase.java | 55 + .../AnnotationPropertyWriterTest.java | 74 + .../class2greylist/ApiComponentsTest.java | 143 + .../class2greylist/ApiResolverTest.java | 159 + .../CovariantReturnTypeHandlerTest.java | 153 + .../RepeatedAnnotationHandlerTest.java | 95 + ...upportedAppUsageAnnotationHandlerTest.java | 581 + tools/common/__init__.py | 17 + tools/common/common.py | 512 + tools/compile-classes.sh | 50 + tools/compile-jar.sh | 41 + tools/cpp-define-generator/Android.bp | 80 + tools/cpp-define-generator/art_field.def | 24 + tools/cpp-define-generator/art_method.def | 40 + tools/cpp-define-generator/asm_defines.cc | 36 + tools/cpp-define-generator/asm_defines.def | 36 + tools/cpp-define-generator/code_item.def | 28 + tools/cpp-define-generator/globals.def | 78 + tools/cpp-define-generator/lockword.def | 58 + tools/cpp-define-generator/make_header.py | 56 + .../cpp-define-generator/make_header_test.py | 49 + tools/cpp-define-generator/mirror_array.def | 40 + tools/cpp-define-generator/mirror_class.def | 46 + .../cpp-define-generator/mirror_dex_cache.def | 34 + tools/cpp-define-generator/mirror_object.def | 33 + tools/cpp-define-generator/mirror_string.def | 26 + tools/cpp-define-generator/osr.def | 23 + tools/cpp-define-generator/profiling_info.def | 22 + tools/cpp-define-generator/rosalloc.def | 38 + tools/cpp-define-generator/runtime.def | 32 + tools/cpp-define-generator/shadow_frame.def | 42 + tools/cpp-define-generator/thread.def | 75 + tools/dexanalyze/Android.bp | 55 + tools/dexanalyze/dexanalyze.cc | 245 + tools/dexanalyze/dexanalyze_bytecode.cc | 597 + tools/dexanalyze/dexanalyze_bytecode.h | 99 + tools/dexanalyze/dexanalyze_experiments.cc | 621 ++ tools/dexanalyze/dexanalyze_experiments.h | 178 + tools/dexanalyze/dexanalyze_strings.cc | 580 + tools/dexanalyze/dexanalyze_strings.h | 74 + tools/dexanalyze/dexanalyze_test.cc | 59 + tools/dexfuzz/Android.bp | 29 + tools/dexfuzz/Android.mk | 21 + tools/dexfuzz/README | 151 + tools/dexfuzz/dexfuzz | 24 + tools/dexfuzz/manifest.txt | 1 + tools/dexfuzz/src/dexfuzz/DexFuzz.java | 116 + .../dexfuzz/src/dexfuzz/ExecutionResult.java | 122 + tools/dexfuzz/src/dexfuzz/Log.java | 78 + tools/dexfuzz/src/dexfuzz/MutationStats.java | 74 + tools/dexfuzz/src/dexfuzz/Options.java | 437 + tools/dexfuzz/src/dexfuzz/StreamConsumer.java | 202 + tools/dexfuzz/src/dexfuzz/Timer.java | 70 + .../src/dexfuzz/executors/Architecture.java | 37 + .../executors/Arm64InterpreterExecutor.java | 39 + .../Arm64OptimizingBackendExecutor.java | 42 + .../executors/ArmInterpreterExecutor.java | 39 + .../ArmOptimizingBackendExecutor.java | 42 + .../dexfuzz/src/dexfuzz/executors/Device.java | 295 + .../src/dexfuzz/executors/Executor.java | 248 + .../executors/X86InterpreterExecutor.java | 40 + .../X86OptimizingBackendExecutor.java | 43 + .../executors/X86_64InterpreterExecutor.java | 36 + .../X86_64OptimizingBackendExecutor.java | 39 + tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java | 421 + .../src/dexfuzz/fuzzers/FuzzerMultiple.java | 43 + .../fuzzers/FuzzerMultipleExecute.java | 51 + .../fuzzers/FuzzerMultipleNoExecute.java | 46 + .../src/dexfuzz/fuzzers/FuzzerSingle.java | 38 + .../dexfuzz/fuzzers/FuzzerSingleExecute.java | 38 + .../fuzzers/FuzzerSingleNoExecute.java | 33 + .../src/dexfuzz/listeners/BaseListener.java | 77 + .../listeners/BisectionSearchListener.java | 108 + .../listeners/ConsoleLoggerListener.java | 163 + .../listeners/FinalStatusListener.java | 52 + .../dexfuzz/listeners/LogFileListener.java | 277 + .../listeners/MultiplexerListener.java | 205 + .../UniqueProgramTrackerListener.java | 259 + .../listeners/UpdatingConsoleListener.java | 108 + .../src/dexfuzz/program/CodeTranslator.java | 600 + .../src/dexfuzz/program/IdCreator.java | 804 ++ .../src/dexfuzz/program/MBranchInsn.java | 37 + tools/dexfuzz/src/dexfuzz/program/MInsn.java | 64 + .../src/dexfuzz/program/MInsnWithData.java | 37 + .../src/dexfuzz/program/MSwitchInsn.java | 50 + .../src/dexfuzz/program/MTryBlock.java | 29 + .../src/dexfuzz/program/MutatableCode.java | 410 + .../dexfuzz/src/dexfuzz/program/Mutation.java | 59 + .../dexfuzz/program/MutationSerializer.java | 95 + .../dexfuzz/src/dexfuzz/program/Program.java | 656 ++ .../program/mutators/ArithOpChanger.java | 290 + .../program/mutators/BranchShifter.java | 170 + .../program/mutators/CmpBiasChanger.java | 154 + .../dexfuzz/program/mutators/CodeMutator.java | 136 + .../mutators/ConstantValueChanger.java | 149 + .../program/mutators/ConversionRepeater.java | 200 + .../program/mutators/FieldFlagChanger.java | 167 + .../program/mutators/IfBranchChanger.java | 158 + .../program/mutators/InstructionDeleter.java | 138 + .../mutators/InstructionDuplicator.java | 104 + .../program/mutators/InstructionSwapper.java | 159 + .../program/mutators/InvokeChanger.java | 178 + .../mutators/NewArrayLengthChanger.java | 159 + .../program/mutators/NewInstanceChanger.java | 218 + .../program/mutators/NewMethodCaller.java | 186 + .../mutators/NonsenseStringPrinter.java | 162 + .../mutators/OppositeBranchChanger.java | 72 + .../program/mutators/PoolIndexChanger.java | 199 + .../program/mutators/RandomBranchChanger.java | 70 + .../mutators/RandomInstructionGenerator.java | 279 + .../program/mutators/RegisterClobber.java | 102 + .../program/mutators/SwitchBranchShifter.java | 175 + .../program/mutators/TryBlockShifter.java | 211 + .../dexfuzz/program/mutators/VRegChanger.java | 195 + .../program/mutators/ValuePrinter.java | 266 + .../src/dexfuzz/rawdex/AnnotationElement.java | 44 + .../src/dexfuzz/rawdex/AnnotationItem.java | 43 + .../src/dexfuzz/rawdex/AnnotationOffItem.java | 38 + .../src/dexfuzz/rawdex/AnnotationSetItem.java | 50 + .../dexfuzz/rawdex/AnnotationSetRefItem.java | 38 + .../dexfuzz/rawdex/AnnotationSetRefList.java | 50 + .../rawdex/AnnotationsDirectoryItem.java | 101 + .../src/dexfuzz/rawdex/ClassDataItem.java | 142 + .../src/dexfuzz/rawdex/ClassDefItem.java | 77 + .../dexfuzz/src/dexfuzz/rawdex/CodeItem.java | 205 + .../src/dexfuzz/rawdex/DebugInfoItem.java | 54 + .../dexfuzz/rawdex/DexRandomAccessFile.java | 319 + .../src/dexfuzz/rawdex/EncodedAnnotation.java | 61 + .../src/dexfuzz/rawdex/EncodedArray.java | 54 + .../src/dexfuzz/rawdex/EncodedArrayItem.java | 40 + .../dexfuzz/rawdex/EncodedCatchHandler.java | 62 + .../rawdex/EncodedCatchHandlerList.java | 48 + .../src/dexfuzz/rawdex/EncodedField.java | 79 + .../src/dexfuzz/rawdex/EncodedMethod.java | 95 + .../dexfuzz/rawdex/EncodedTypeAddrPair.java | 43 + .../src/dexfuzz/rawdex/EncodedValue.java | 98 + .../src/dexfuzz/rawdex/FieldAnnotation.java | 43 + .../src/dexfuzz/rawdex/FieldIdItem.java | 54 + .../src/dexfuzz/rawdex/HeaderItem.java | 128 + .../src/dexfuzz/rawdex/Instruction.java | 584 + tools/dexfuzz/src/dexfuzz/rawdex/MapItem.java | 69 + tools/dexfuzz/src/dexfuzz/rawdex/MapList.java | 223 + .../src/dexfuzz/rawdex/MethodAnnotation.java | 43 + .../src/dexfuzz/rawdex/MethodIdItem.java | 54 + tools/dexfuzz/src/dexfuzz/rawdex/Offset.java | 191 + .../src/dexfuzz/rawdex/OffsetTracker.java | 513 + .../src/dexfuzz/rawdex/Offsettable.java | 117 + tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java | 280 + .../src/dexfuzz/rawdex/OpcodeInfo.java | 41 + .../dexfuzz/rawdex/ParameterAnnotation.java | 43 + .../src/dexfuzz/rawdex/ProtoIdItem.java | 51 + .../src/dexfuzz/rawdex/RawDexFile.java | 390 + .../src/dexfuzz/rawdex/RawDexObject.java | 53 + .../src/dexfuzz/rawdex/StringDataItem.java | 88 + .../src/dexfuzz/rawdex/StringIdItem.java | 40 + tools/dexfuzz/src/dexfuzz/rawdex/TryItem.java | 44 + .../src/dexfuzz/rawdex/TypeIdItem.java | 42 + .../dexfuzz/src/dexfuzz/rawdex/TypeItem.java | 40 + .../dexfuzz/src/dexfuzz/rawdex/TypeList.java | 71 + .../rawdex/formats/AbstractFormat.java | 72 + .../dexfuzz/rawdex/formats/ContainsConst.java | 32 + .../rawdex/formats/ContainsPoolIndex.java | 41 + .../rawdex/formats/ContainsTarget.java | 29 + .../dexfuzz/rawdex/formats/ContainsVRegs.java | 25 + .../src/dexfuzz/rawdex/formats/Format00x.java | 54 + .../src/dexfuzz/rawdex/formats/Format1.java | 54 + .../src/dexfuzz/rawdex/formats/Format10t.java | 56 + .../src/dexfuzz/rawdex/formats/Format10x.java | 46 + .../src/dexfuzz/rawdex/formats/Format11n.java | 66 + .../src/dexfuzz/rawdex/formats/Format11x.java | 51 + .../src/dexfuzz/rawdex/formats/Format12x.java | 51 + .../src/dexfuzz/rawdex/formats/Format2.java | 54 + .../dexfuzz/rawdex/formats/Format20bc.java | 51 + .../src/dexfuzz/rawdex/formats/Format20t.java | 57 + .../src/dexfuzz/rawdex/formats/Format21c.java | 78 + .../src/dexfuzz/rawdex/formats/Format21h.java | 67 + .../src/dexfuzz/rawdex/formats/Format21s.java | 67 + .../src/dexfuzz/rawdex/formats/Format21t.java | 62 + .../src/dexfuzz/rawdex/formats/Format22b.java | 68 + .../src/dexfuzz/rawdex/formats/Format22c.java | 72 + .../dexfuzz/rawdex/formats/Format22cs.java | 72 + .../src/dexfuzz/rawdex/formats/Format22s.java | 67 + .../src/dexfuzz/rawdex/formats/Format22t.java | 62 + .../src/dexfuzz/rawdex/formats/Format22x.java | 52 + .../src/dexfuzz/rawdex/formats/Format23x.java | 53 + .../src/dexfuzz/rawdex/formats/Format3.java | 54 + .../src/dexfuzz/rawdex/formats/Format30t.java | 57 + .../src/dexfuzz/rawdex/formats/Format31c.java | 68 + .../src/dexfuzz/rawdex/formats/Format31i.java | 67 + .../src/dexfuzz/rawdex/formats/Format31t.java | 62 + .../src/dexfuzz/rawdex/formats/Format32x.java | 53 + .../src/dexfuzz/rawdex/formats/Format35c.java | 75 + .../dexfuzz/rawdex/formats/Format35mi.java | 55 + .../dexfuzz/rawdex/formats/Format35ms.java | 55 + .../src/dexfuzz/rawdex/formats/Format3rc.java | 68 + .../dexfuzz/rawdex/formats/Format3rmi.java | 48 + .../dexfuzz/rawdex/formats/Format3rms.java | 48 + .../src/dexfuzz/rawdex/formats/Format5.java | 54 + .../src/dexfuzz/rawdex/formats/Format51l.java | 70 + .../dexfuzz/rawdex/formats/RawInsnHelper.java | 119 + tools/dist_linux_bionic.sh | 44 + tools/dmtracedump/Android.bp | 46 + tools/dmtracedump/createtesttrace.cc | 449 + tools/dmtracedump/dmtracedump.pl | 18 + tools/dmtracedump/dumpdir.sh | 11 + tools/dmtracedump/profile.h | 43 + tools/dmtracedump/tracedump.cc | 2620 +++++ tools/dt_fds_forward.py | 197 + tools/external_oj_libjdwp_art_failures.txt | 58 + tools/extract-embedded-java | 35 + tools/findbuildbotwarnings.py | 96 + tools/generate_cmake_lists.py | 98 + tools/generate_operator_out.py | 228 + tools/golem/build-target.sh | 378 + tools/golem/env | 117 + tools/hiddenapi/Android.bp | 69 + tools/hiddenapi/README.md | 54 + tools/hiddenapi/find_api_violations.pl | 133 + tools/hiddenapi/hiddenapi.cc | 1125 ++ tools/hiddenapi/hiddenapi_test.cc | 754 ++ tools/host_bcp.sh | 92 + tools/javac-helper.sh | 105 + tools/jfuzz/Android.bp | 29 + tools/jfuzz/README.md | 130 + tools/jfuzz/__init__.py | 17 + tools/jfuzz/jfuzz.cc | 1361 +++ tools/jfuzz/run_dex_fuzz_test.py | 193 + tools/jfuzz/run_jfuzz_test.py | 647 ++ tools/jfuzz/run_jfuzz_test_nightly.py | 95 + tools/jvmti-agents/README.md | 16 + .../jvmti-agents/breakpoint-logger/Android.bp | 57 + .../jvmti-agents/breakpoint-logger/README.md | 54 + .../breakpoint-logger/breakpoint_logger.cc | 447 + tools/jvmti-agents/chain-agents/Android.bp | 75 + tools/jvmti-agents/chain-agents/README.md | 36 + .../jvmti-agents/chain-agents/chainagents.cc | 136 + .../jvmti-agents/dump-jvmti-state/Android.bp | 47 + tools/jvmti-agents/dump-jvmti-state/README.md | 27 + .../dump-jvmti-state/dump-jvmti.cc | 115 + tools/jvmti-agents/field-counts/Android.bp | 73 + tools/jvmti-agents/field-counts/README.md | 64 + .../jvmti-agents/field-counts/count-fields.py | 167 + tools/jvmti-agents/field-counts/fieldcount.cc | 274 + .../field-null-percent/Android.bp | 71 + .../jvmti-agents/field-null-percent/README.md | 51 + .../field-null-percent/check-null-fields.py | 149 + .../field-null-percent/fieldnull.cc | 218 + tools/jvmti-agents/jit-load/Android.bp | 76 + tools/jvmti-agents/jit-load/README.md | 35 + tools/jvmti-agents/jit-load/jitload.cc | 144 + tools/jvmti-agents/list-extensions/Android.bp | 47 + tools/jvmti-agents/list-extensions/README.md | 56 + .../list-extensions/list-extensions.cc | 170 + .../simple-force-redefine/Android.bp | 68 + .../simple-force-redefine/README.md | 33 + .../simple-force-redefine/forceredefine.cc | 317 + tools/jvmti-agents/ti-alloc-sample/Android.bp | 73 + tools/jvmti-agents/ti-alloc-sample/README.md | 79 + tools/jvmti-agents/ti-alloc-sample/mkflame.py | 213 + .../ti-alloc-sample/ti_alloc_sample.cc | 461 + tools/jvmti-agents/ti-fast/Android.bp | 73 + tools/jvmti-agents/ti-fast/README.md | 112 + tools/jvmti-agents/ti-fast/tifast.cc | 736 ++ tools/jvmti-agents/titrace/Android.bp | 70 + tools/jvmti-agents/titrace/README.md | 62 + .../titrace/instruction_decoder.cc | 519 + .../titrace/instruction_decoder.h | 42 + tools/jvmti-agents/titrace/titrace.cc | 314 + .../wrapagentproperties/Android.bp | 57 + .../wrapagentproperties/README.md | 30 + .../wrapagentproperties.cc | 346 + tools/libcore_failures.txt | 276 + tools/libcore_fugu_failures.txt | 131 + tools/libcore_gcstress_debug_failures.txt | 64 + tools/libcore_gcstress_failures.txt | 41 + tools/libjavac/Android.bp | 27 + .../libjavac/src/com/android/javac/Javac.java | 126 + tools/libjdwp-compat.props | 18 + tools/luci/config/cr-buildbucket.cfg | 135 + tools/luci/config/luci-logdog.cfg | 18 + tools/luci/config/luci-milo.cfg | 146 + tools/luci/config/luci-notify.cfg | 126 + .../email-templates/default.template | 37 + tools/luci/config/luci-scheduler.cfg | 399 + tools/luci/config/project.cfg | 4 + tools/parallel_run.py | 79 + tools/prebuilt_libjdwp_art_failures.txt | 110 + tools/public.libraries.buildbot.txt | 6 + tools/run-gtests.sh | 68 + tools/run-jdwp-tests.sh | 456 + tools/run-libcore-tests.sh | 322 + tools/run-libjdwp-tests.sh | 148 + tools/run-prebuilt-libjdwp-tests.sh | 112 + tools/runtime_memusage/README | 100 + .../prune_sanitizer_output.py | 156 + .../sanitizer_logcat_analysis.sh | 374 + tools/runtime_memusage/symbol_trace_info.py | 306 + tools/signal_dumper/Android.bp | 68 + tools/signal_dumper/signal_dumper.cc | 726 ++ tools/stream-trace-converter.py | 210 + tools/symbolize.sh | 72 + tools/test_presubmit.py | 159 + tools/tracefast-plugin/Android.bp | 94 + tools/tracefast-plugin/tracefast.cc | 173 + tools/veridex/Android.bp | 55 + tools/veridex/Android.mk | 71 + tools/veridex/README.md | 14 + tools/veridex/api_list_filter.h | 74 + tools/veridex/appcompat.sh | 81 + tools/veridex/class_filter.h | 48 + tools/veridex/flow_analysis.cc | 799 ++ tools/veridex/flow_analysis.h | 228 + tools/veridex/hidden_api.cc | 94 + tools/veridex/hidden_api.h | 121 + tools/veridex/hidden_api_finder.cc | 262 + tools/veridex/hidden_api_finder.h | 64 + tools/veridex/precise_hidden_api_finder.cc | 123 + tools/veridex/precise_hidden_api_finder.h | 67 + tools/veridex/resolver.cc | 314 + tools/veridex/resolver.h | 100 + tools/veridex/veridex.cc | 347 + tools/veridex/veridex.h | 116 + tools/wrap-logcat.py | 64 + 7117 files changed, 1030084 insertions(+) create mode 100644 .gitignore create mode 100644 Android.bp create mode 100644 Android.mk create mode 100644 CPPLINT.cfg create mode 100644 CleanSpec.mk create mode 100644 MODULE_LICENSE_APACHE2 create mode 100644 NOTICE create mode 100644 OWNERS create mode 100644 PREUPLOAD.cfg create mode 100644 TEST_MAPPING create mode 100644 adbconnection/Android.bp create mode 100644 adbconnection/adbconnection.cc create mode 100644 adbconnection/adbconnection.h create mode 100644 benchmark/Android.bp create mode 100644 benchmark/const-class/info.txt create mode 100644 benchmark/const-class/src/ConstClassBenchmark.java create mode 100644 benchmark/const-string/info.txt create mode 100644 benchmark/const-string/src/ConstStringBenchmark.java create mode 100644 benchmark/jni-perf/info.txt create mode 100644 benchmark/jni-perf/perf_jni.cc create mode 100644 benchmark/jni-perf/src/JniPerfBenchmark.java create mode 100644 benchmark/jni_loader.cc create mode 100644 benchmark/jobject-benchmark/info.txt create mode 100644 benchmark/jobject-benchmark/jobject_benchmark.cc create mode 100644 benchmark/jobject-benchmark/src/JObjectBenchmark.java create mode 100644 benchmark/micro-native/micro_native.cc create mode 100644 benchmark/scoped-primitive-array/info.txt create mode 100644 benchmark/scoped-primitive-array/scoped_primitive_array.cc create mode 100644 benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java create mode 100644 benchmark/string-indexof/info.txt create mode 100644 benchmark/string-indexof/src/StringIndexOfBenchmark.java create mode 100644 benchmark/stringbuilder-append/info.txt create mode 100644 benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java create mode 100644 benchmark/type-check/info.txt create mode 100644 benchmark/type-check/src/TypeCheckBenchmark.java create mode 100644 build/Android.bp create mode 100644 build/Android.common.mk create mode 100644 build/Android.common_build.mk create mode 100644 build/Android.common_path.mk create mode 100644 build/Android.common_test.mk create mode 100644 build/Android.cpplint.mk create mode 100644 build/Android.gtest.mk create mode 100644 build/Android.oat.mk create mode 100644 build/apex/Android.bp create mode 100644 build/apex/art_apex_boot_integrity.rc create mode 100644 build/apex/art_apex_boot_integrity.sh create mode 100755 build/apex/art_apex_test.py create mode 100644 build/apex/com.android.art.avbpubkey create mode 100644 build/apex/com.android.art.pem create mode 100644 build/apex/com.android.art.pk8 create mode 100644 build/apex/com.android.art.x509.pem create mode 100644 build/apex/ld.config.txt create mode 100644 build/apex/manifest-art.json create mode 100755 build/apex/runtests.sh create mode 100644 build/art.go create mode 100644 build/codegen.go create mode 100644 build/makevars.go create mode 100644 build/sdk/Android.bp create mode 100644 cmdline/Android.bp create mode 100644 cmdline/README.md create mode 100644 cmdline/cmdline.h create mode 100644 cmdline/cmdline_parse_result.h create mode 100644 cmdline/cmdline_parser.h create mode 100644 cmdline/cmdline_parser_test.cc create mode 100644 cmdline/cmdline_result.h create mode 100644 cmdline/cmdline_type_parser.h create mode 100644 cmdline/cmdline_types.h create mode 100644 cmdline/detail/cmdline_debug_detail.h create mode 100644 cmdline/detail/cmdline_parse_argument_detail.h create mode 100644 cmdline/detail/cmdline_parser_detail.h create mode 100644 cmdline/memory_representation.h create mode 100644 cmdline/token_range.h create mode 100644 cmdline/unit.h create mode 100644 compiler/Android.bp create mode 100644 compiler/cfi_test.h create mode 100644 compiler/common_compiler_test.cc create mode 100644 compiler/common_compiler_test.h create mode 100644 compiler/compiled_method-inl.h create mode 100644 compiler/compiled_method.cc create mode 100644 compiler/compiled_method.h create mode 100644 compiler/compiler.cc create mode 100644 compiler/compiler.h create mode 100644 compiler/debug/debug_info.h create mode 100644 compiler/debug/dwarf/dwarf_test.cc create mode 100644 compiler/debug/dwarf/dwarf_test.h create mode 100644 compiler/debug/elf_compilation_unit.h create mode 100644 compiler/debug/elf_debug_frame_writer.h create mode 100644 compiler/debug/elf_debug_info_writer.h create mode 100644 compiler/debug/elf_debug_line_writer.h create mode 100644 compiler/debug/elf_debug_loc_writer.h create mode 100644 compiler/debug/elf_debug_writer.cc create mode 100644 compiler/debug/elf_debug_writer.h create mode 100644 compiler/debug/elf_symtab_writer.h create mode 100644 compiler/debug/method_debug_info.h create mode 100644 compiler/debug/src_map_elem.h create mode 100644 compiler/debug/src_map_elem_test.cc create mode 100644 compiler/dex/inline_method_analyser.cc create mode 100644 compiler/dex/inline_method_analyser.h create mode 100644 compiler/dex/verification_results.cc create mode 100644 compiler/dex/verification_results.h create mode 100644 compiler/dex/verified_method.cc create mode 100644 compiler/dex/verified_method.h create mode 100644 compiler/driver/compiled_method_storage.cc create mode 100644 compiler/driver/compiled_method_storage.h create mode 100644 compiler/driver/compiled_method_storage_test.cc create mode 100644 compiler/driver/compiler_options.cc create mode 100644 compiler/driver/compiler_options.h create mode 100644 compiler/driver/compiler_options_map-inl.h create mode 100644 compiler/driver/compiler_options_map-storage.h create mode 100644 compiler/driver/compiler_options_map.def create mode 100644 compiler/driver/compiler_options_map.h create mode 100644 compiler/driver/dex_compilation_unit.cc create mode 100644 compiler/driver/dex_compilation_unit.h create mode 100644 compiler/driver/simple_compiler_options_map.h create mode 100644 compiler/exception_test.cc create mode 100644 compiler/jit/jit_compiler.cc create mode 100644 compiler/jit/jit_compiler.h create mode 100644 compiler/jit/jit_logger.cc create mode 100644 compiler/jit/jit_logger.h create mode 100644 compiler/jni/jni_cfi_test.cc create mode 100644 compiler/jni/jni_cfi_test_expected.inc create mode 100644 compiler/jni/jni_compiler_test.cc create mode 100644 compiler/jni/quick/arm/calling_convention_arm.cc create mode 100644 compiler/jni/quick/arm/calling_convention_arm.h create mode 100644 compiler/jni/quick/arm64/calling_convention_arm64.cc create mode 100644 compiler/jni/quick/arm64/calling_convention_arm64.h create mode 100644 compiler/jni/quick/calling_convention.cc create mode 100644 compiler/jni/quick/calling_convention.h create mode 100644 compiler/jni/quick/jni_compiler.cc create mode 100644 compiler/jni/quick/jni_compiler.h create mode 100644 compiler/jni/quick/x86/calling_convention_x86.cc create mode 100644 compiler/jni/quick/x86/calling_convention_x86.h create mode 100644 compiler/jni/quick/x86_64/calling_convention_x86_64.cc create mode 100644 compiler/jni/quick/x86_64/calling_convention_x86_64.h create mode 100644 compiler/linker/linker_patch.h create mode 100644 compiler/linker/linker_patch_test.cc create mode 100644 compiler/linker/output_stream_test.cc create mode 100644 compiler/optimizing/block_builder.cc create mode 100644 compiler/optimizing/block_builder.h create mode 100644 compiler/optimizing/bounds_check_elimination.cc create mode 100644 compiler/optimizing/bounds_check_elimination.h create mode 100644 compiler/optimizing/bounds_check_elimination_test.cc create mode 100644 compiler/optimizing/builder.cc create mode 100644 compiler/optimizing/builder.h create mode 100644 compiler/optimizing/cha_guard_optimization.cc create mode 100644 compiler/optimizing/cha_guard_optimization.h create mode 100644 compiler/optimizing/code_generator.cc create mode 100644 compiler/optimizing/code_generator.h create mode 100644 compiler/optimizing/code_generator_arm64.cc create mode 100644 compiler/optimizing/code_generator_arm64.h create mode 100644 compiler/optimizing/code_generator_arm_vixl.cc create mode 100644 compiler/optimizing/code_generator_arm_vixl.h create mode 100644 compiler/optimizing/code_generator_utils.cc create mode 100644 compiler/optimizing/code_generator_utils.h create mode 100644 compiler/optimizing/code_generator_vector_arm64.cc create mode 100644 compiler/optimizing/code_generator_vector_arm_vixl.cc create mode 100644 compiler/optimizing/code_generator_vector_x86.cc create mode 100644 compiler/optimizing/code_generator_vector_x86_64.cc create mode 100644 compiler/optimizing/code_generator_x86.cc create mode 100644 compiler/optimizing/code_generator_x86.h create mode 100644 compiler/optimizing/code_generator_x86_64.cc create mode 100644 compiler/optimizing/code_generator_x86_64.h create mode 100644 compiler/optimizing/code_sinking.cc create mode 100644 compiler/optimizing/code_sinking.h create mode 100644 compiler/optimizing/codegen_test.cc create mode 100644 compiler/optimizing/codegen_test_utils.h create mode 100644 compiler/optimizing/common_arm.h create mode 100644 compiler/optimizing/common_arm64.h create mode 100644 compiler/optimizing/common_dominator.h create mode 100644 compiler/optimizing/constant_folding.cc create mode 100644 compiler/optimizing/constant_folding.h create mode 100644 compiler/optimizing/constant_folding_test.cc create mode 100644 compiler/optimizing/constructor_fence_redundancy_elimination.cc create mode 100644 compiler/optimizing/constructor_fence_redundancy_elimination.h create mode 100644 compiler/optimizing/data_type-inl.h create mode 100644 compiler/optimizing/data_type.cc create mode 100644 compiler/optimizing/data_type.h create mode 100644 compiler/optimizing/data_type_test.cc create mode 100644 compiler/optimizing/dead_code_elimination.cc create mode 100644 compiler/optimizing/dead_code_elimination.h create mode 100644 compiler/optimizing/dead_code_elimination_test.cc create mode 100644 compiler/optimizing/dominator_test.cc create mode 100644 compiler/optimizing/escape.cc create mode 100644 compiler/optimizing/escape.h create mode 100644 compiler/optimizing/find_loops_test.cc create mode 100644 compiler/optimizing/graph_checker.cc create mode 100644 compiler/optimizing/graph_checker.h create mode 100644 compiler/optimizing/graph_checker_test.cc create mode 100644 compiler/optimizing/graph_test.cc create mode 100644 compiler/optimizing/graph_visualizer.cc create mode 100644 compiler/optimizing/graph_visualizer.h create mode 100644 compiler/optimizing/gvn.cc create mode 100644 compiler/optimizing/gvn.h create mode 100644 compiler/optimizing/gvn_test.cc create mode 100644 compiler/optimizing/induction_var_analysis.cc create mode 100644 compiler/optimizing/induction_var_analysis.h create mode 100644 compiler/optimizing/induction_var_analysis_test.cc create mode 100644 compiler/optimizing/induction_var_range.cc create mode 100644 compiler/optimizing/induction_var_range.h create mode 100644 compiler/optimizing/induction_var_range_test.cc create mode 100644 compiler/optimizing/inliner.cc create mode 100644 compiler/optimizing/inliner.h create mode 100644 compiler/optimizing/instruction_builder.cc create mode 100644 compiler/optimizing/instruction_builder.h create mode 100644 compiler/optimizing/instruction_simplifier.cc create mode 100644 compiler/optimizing/instruction_simplifier.h create mode 100644 compiler/optimizing/instruction_simplifier_arm.cc create mode 100644 compiler/optimizing/instruction_simplifier_arm.h create mode 100644 compiler/optimizing/instruction_simplifier_arm64.cc create mode 100644 compiler/optimizing/instruction_simplifier_arm64.h create mode 100644 compiler/optimizing/instruction_simplifier_shared.cc create mode 100644 compiler/optimizing/instruction_simplifier_shared.h create mode 100644 compiler/optimizing/instruction_simplifier_x86.cc create mode 100644 compiler/optimizing/instruction_simplifier_x86.h create mode 100644 compiler/optimizing/instruction_simplifier_x86_64.cc create mode 100644 compiler/optimizing/instruction_simplifier_x86_64.h create mode 100644 compiler/optimizing/instruction_simplifier_x86_shared.cc create mode 100644 compiler/optimizing/instruction_simplifier_x86_shared.h create mode 100644 compiler/optimizing/intrinsic_objects.cc create mode 100644 compiler/optimizing/intrinsic_objects.h create mode 100644 compiler/optimizing/intrinsics.cc create mode 100644 compiler/optimizing/intrinsics.h create mode 100644 compiler/optimizing/intrinsics_arm64.cc create mode 100644 compiler/optimizing/intrinsics_arm64.h create mode 100644 compiler/optimizing/intrinsics_arm_vixl.cc create mode 100644 compiler/optimizing/intrinsics_arm_vixl.h create mode 100644 compiler/optimizing/intrinsics_utils.h create mode 100644 compiler/optimizing/intrinsics_x86.cc create mode 100644 compiler/optimizing/intrinsics_x86.h create mode 100644 compiler/optimizing/intrinsics_x86_64.cc create mode 100644 compiler/optimizing/intrinsics_x86_64.h create mode 100644 compiler/optimizing/licm.cc create mode 100644 compiler/optimizing/licm.h create mode 100644 compiler/optimizing/licm_test.cc create mode 100644 compiler/optimizing/linear_order.cc create mode 100644 compiler/optimizing/linear_order.h create mode 100644 compiler/optimizing/linearize_test.cc create mode 100644 compiler/optimizing/live_interval_test.cc create mode 100644 compiler/optimizing/live_ranges_test.cc create mode 100644 compiler/optimizing/liveness_test.cc create mode 100644 compiler/optimizing/load_store_analysis.cc create mode 100644 compiler/optimizing/load_store_analysis.h create mode 100644 compiler/optimizing/load_store_analysis_test.cc create mode 100644 compiler/optimizing/load_store_elimination.cc create mode 100644 compiler/optimizing/load_store_elimination.h create mode 100644 compiler/optimizing/load_store_elimination_test.cc create mode 100644 compiler/optimizing/locations.cc create mode 100644 compiler/optimizing/locations.h create mode 100644 compiler/optimizing/loop_analysis.cc create mode 100644 compiler/optimizing/loop_analysis.h create mode 100644 compiler/optimizing/loop_optimization.cc create mode 100644 compiler/optimizing/loop_optimization.h create mode 100644 compiler/optimizing/loop_optimization_test.cc create mode 100644 compiler/optimizing/nodes.cc create mode 100644 compiler/optimizing/nodes.h create mode 100644 compiler/optimizing/nodes_shared.cc create mode 100644 compiler/optimizing/nodes_shared.h create mode 100644 compiler/optimizing/nodes_test.cc create mode 100644 compiler/optimizing/nodes_vector.h create mode 100644 compiler/optimizing/nodes_vector_test.cc create mode 100644 compiler/optimizing/nodes_x86.h create mode 100644 compiler/optimizing/optimization.cc create mode 100644 compiler/optimizing/optimization.h create mode 100644 compiler/optimizing/optimizing_cfi_test.cc create mode 100644 compiler/optimizing/optimizing_cfi_test_expected.inc create mode 100644 compiler/optimizing/optimizing_compiler.cc create mode 100644 compiler/optimizing/optimizing_compiler.h create mode 100644 compiler/optimizing/optimizing_compiler_stats.h create mode 100644 compiler/optimizing/optimizing_unit_test.h create mode 100644 compiler/optimizing/parallel_move_resolver.cc create mode 100644 compiler/optimizing/parallel_move_resolver.h create mode 100644 compiler/optimizing/parallel_move_test.cc create mode 100644 compiler/optimizing/pc_relative_fixups_x86.cc create mode 100644 compiler/optimizing/pc_relative_fixups_x86.h create mode 100644 compiler/optimizing/prepare_for_register_allocation.cc create mode 100644 compiler/optimizing/prepare_for_register_allocation.h create mode 100644 compiler/optimizing/pretty_printer.h create mode 100644 compiler/optimizing/pretty_printer_test.cc create mode 100644 compiler/optimizing/reference_type_propagation.cc create mode 100644 compiler/optimizing/reference_type_propagation.h create mode 100644 compiler/optimizing/reference_type_propagation_test.cc create mode 100644 compiler/optimizing/register_allocation_resolver.cc create mode 100644 compiler/optimizing/register_allocation_resolver.h create mode 100644 compiler/optimizing/register_allocator.cc create mode 100644 compiler/optimizing/register_allocator.h create mode 100644 compiler/optimizing/register_allocator_graph_color.cc create mode 100644 compiler/optimizing/register_allocator_graph_color.h create mode 100644 compiler/optimizing/register_allocator_linear_scan.cc create mode 100644 compiler/optimizing/register_allocator_linear_scan.h create mode 100644 compiler/optimizing/register_allocator_test.cc create mode 100644 compiler/optimizing/scheduler.cc create mode 100644 compiler/optimizing/scheduler.h create mode 100644 compiler/optimizing/scheduler_arm.cc create mode 100644 compiler/optimizing/scheduler_arm.h create mode 100644 compiler/optimizing/scheduler_arm64.cc create mode 100644 compiler/optimizing/scheduler_arm64.h create mode 100644 compiler/optimizing/scheduler_test.cc create mode 100644 compiler/optimizing/select_generator.cc create mode 100644 compiler/optimizing/select_generator.h create mode 100644 compiler/optimizing/select_generator_test.cc create mode 100644 compiler/optimizing/sharpening.cc create mode 100644 compiler/optimizing/sharpening.h create mode 100644 compiler/optimizing/side_effects_analysis.cc create mode 100644 compiler/optimizing/side_effects_analysis.h create mode 100644 compiler/optimizing/side_effects_test.cc create mode 100644 compiler/optimizing/ssa_builder.cc create mode 100644 compiler/optimizing/ssa_builder.h create mode 100644 compiler/optimizing/ssa_liveness_analysis.cc create mode 100644 compiler/optimizing/ssa_liveness_analysis.h create mode 100644 compiler/optimizing/ssa_liveness_analysis_test.cc create mode 100644 compiler/optimizing/ssa_phi_elimination.cc create mode 100644 compiler/optimizing/ssa_phi_elimination.h create mode 100644 compiler/optimizing/ssa_test.cc create mode 100644 compiler/optimizing/stack_map_stream.cc create mode 100644 compiler/optimizing/stack_map_stream.h create mode 100644 compiler/optimizing/stack_map_test.cc create mode 100644 compiler/optimizing/superblock_cloner.cc create mode 100644 compiler/optimizing/superblock_cloner.h create mode 100644 compiler/optimizing/superblock_cloner_test.cc create mode 100644 compiler/optimizing/suspend_check_test.cc create mode 100644 compiler/optimizing/x86_memory_gen.cc create mode 100644 compiler/optimizing/x86_memory_gen.h create mode 100644 compiler/trampolines/trampoline_compiler.cc create mode 100644 compiler/trampolines/trampoline_compiler.h create mode 100644 compiler/utils/arm/assembler_arm_shared.h create mode 100644 compiler/utils/arm/assembler_arm_vixl.cc create mode 100644 compiler/utils/arm/assembler_arm_vixl.h create mode 100644 compiler/utils/arm/constants_arm.cc create mode 100644 compiler/utils/arm/constants_arm.h create mode 100644 compiler/utils/arm/jni_macro_assembler_arm_vixl.cc create mode 100644 compiler/utils/arm/jni_macro_assembler_arm_vixl.h create mode 100644 compiler/utils/arm/managed_register_arm.cc create mode 100644 compiler/utils/arm/managed_register_arm.h create mode 100644 compiler/utils/arm/managed_register_arm_test.cc create mode 100644 compiler/utils/arm64/assembler_arm64.cc create mode 100644 compiler/utils/arm64/assembler_arm64.h create mode 100644 compiler/utils/arm64/jni_macro_assembler_arm64.cc create mode 100644 compiler/utils/arm64/jni_macro_assembler_arm64.h create mode 100644 compiler/utils/arm64/managed_register_arm64.cc create mode 100644 compiler/utils/arm64/managed_register_arm64.h create mode 100644 compiler/utils/arm64/managed_register_arm64_test.cc create mode 100644 compiler/utils/assembler.cc create mode 100644 compiler/utils/assembler.h create mode 100644 compiler/utils/assembler_test.h create mode 100644 compiler/utils/assembler_test_base.h create mode 100644 compiler/utils/assembler_thumb_test.cc create mode 100644 compiler/utils/assembler_thumb_test_expected.cc.inc create mode 100644 compiler/utils/atomic_dex_ref_map-inl.h create mode 100644 compiler/utils/atomic_dex_ref_map.h create mode 100644 compiler/utils/atomic_dex_ref_map_test.cc create mode 100644 compiler/utils/dedupe_set-inl.h create mode 100644 compiler/utils/dedupe_set.h create mode 100644 compiler/utils/dedupe_set_test.cc create mode 100644 compiler/utils/jni_macro_assembler.cc create mode 100644 compiler/utils/jni_macro_assembler.h create mode 100644 compiler/utils/jni_macro_assembler_test.h create mode 100644 compiler/utils/label.h create mode 100644 compiler/utils/managed_register.h create mode 100644 compiler/utils/stack_checks.h create mode 100644 compiler/utils/swap_space.cc create mode 100644 compiler/utils/swap_space.h create mode 100644 compiler/utils/swap_space_test.cc create mode 100644 compiler/utils/x86/assembler_x86.cc create mode 100644 compiler/utils/x86/assembler_x86.h create mode 100644 compiler/utils/x86/assembler_x86_test.cc create mode 100644 compiler/utils/x86/constants_x86.h create mode 100644 compiler/utils/x86/jni_macro_assembler_x86.cc create mode 100644 compiler/utils/x86/jni_macro_assembler_x86.h create mode 100644 compiler/utils/x86/managed_register_x86.cc create mode 100644 compiler/utils/x86/managed_register_x86.h create mode 100644 compiler/utils/x86/managed_register_x86_test.cc create mode 100644 compiler/utils/x86_64/assembler_x86_64.cc create mode 100644 compiler/utils/x86_64/assembler_x86_64.h create mode 100644 compiler/utils/x86_64/assembler_x86_64_test.cc create mode 100644 compiler/utils/x86_64/constants_x86_64.h create mode 100644 compiler/utils/x86_64/jni_macro_assembler_x86_64.cc create mode 100644 compiler/utils/x86_64/jni_macro_assembler_x86_64.h create mode 100644 compiler/utils/x86_64/managed_register_x86_64.cc create mode 100644 compiler/utils/x86_64/managed_register_x86_64.h create mode 100644 compiler/utils/x86_64/managed_register_x86_64_test.cc create mode 100644 dalvikvm/Android.bp create mode 100644 dalvikvm/dalvikvm.cc create mode 100644 dex2oat/Android.bp create mode 100644 dex2oat/common_compiler_driver_test.cc create mode 100644 dex2oat/common_compiler_driver_test.h create mode 100644 dex2oat/dex/dex_to_dex_compiler.cc create mode 100644 dex2oat/dex/dex_to_dex_compiler.h create mode 100644 dex2oat/dex/dex_to_dex_decompiler_test.cc create mode 100644 dex2oat/dex/quick_compiler_callbacks.cc create mode 100644 dex2oat/dex/quick_compiler_callbacks.h create mode 100644 dex2oat/dex2oat.cc create mode 100644 dex2oat/dex2oat_image_test.cc create mode 100644 dex2oat/dex2oat_options.cc create mode 100644 dex2oat/dex2oat_options.def create mode 100644 dex2oat/dex2oat_options.h create mode 100644 dex2oat/dex2oat_test.cc create mode 100644 dex2oat/dex2oat_vdex_test.cc create mode 100644 dex2oat/driver/compiler_driver-inl.h create mode 100644 dex2oat/driver/compiler_driver.cc create mode 100644 dex2oat/driver/compiler_driver.h create mode 100644 dex2oat/driver/compiler_driver_test.cc create mode 100644 dex2oat/include/dex2oat_return_codes.h create mode 100644 dex2oat/linker/arm/relative_patcher_arm_base.cc create mode 100644 dex2oat/linker/arm/relative_patcher_arm_base.h create mode 100644 dex2oat/linker/arm/relative_patcher_thumb2.cc create mode 100644 dex2oat/linker/arm/relative_patcher_thumb2.h create mode 100644 dex2oat/linker/arm/relative_patcher_thumb2_test.cc create mode 100644 dex2oat/linker/arm64/relative_patcher_arm64.cc create mode 100644 dex2oat/linker/arm64/relative_patcher_arm64.h create mode 100644 dex2oat/linker/arm64/relative_patcher_arm64_test.cc create mode 100644 dex2oat/linker/elf_writer.cc create mode 100644 dex2oat/linker/elf_writer.h create mode 100644 dex2oat/linker/elf_writer_quick.cc create mode 100644 dex2oat/linker/elf_writer_quick.h create mode 100644 dex2oat/linker/elf_writer_test.cc create mode 100644 dex2oat/linker/image_test.cc create mode 100644 dex2oat/linker/image_test.h create mode 100644 dex2oat/linker/image_write_read_test.cc create mode 100644 dex2oat/linker/image_writer.cc create mode 100644 dex2oat/linker/image_writer.h create mode 100644 dex2oat/linker/index_bss_mapping_encoder.h create mode 100644 dex2oat/linker/index_bss_mapping_encoder_test.cc create mode 100644 dex2oat/linker/multi_oat_relative_patcher.cc create mode 100644 dex2oat/linker/multi_oat_relative_patcher.h create mode 100644 dex2oat/linker/multi_oat_relative_patcher_test.cc create mode 100644 dex2oat/linker/oat_writer.cc create mode 100644 dex2oat/linker/oat_writer.h create mode 100644 dex2oat/linker/oat_writer_test.cc create mode 100644 dex2oat/linker/relative_patcher.cc create mode 100644 dex2oat/linker/relative_patcher.h create mode 100644 dex2oat/linker/relative_patcher_test.h create mode 100644 dex2oat/linker/x86/relative_patcher_x86.cc create mode 100644 dex2oat/linker/x86/relative_patcher_x86.h create mode 100644 dex2oat/linker/x86/relative_patcher_x86_base.cc create mode 100644 dex2oat/linker/x86/relative_patcher_x86_base.h create mode 100644 dex2oat/linker/x86/relative_patcher_x86_test.cc create mode 100644 dex2oat/linker/x86_64/relative_patcher_x86_64.cc create mode 100644 dex2oat/linker/x86_64/relative_patcher_x86_64.h create mode 100644 dex2oat/linker/x86_64/relative_patcher_x86_64_test.cc create mode 100644 dex2oat/verifier_deps_test.cc create mode 100644 dexdump/Android.bp create mode 100644 dexdump/dexdump.cc create mode 100644 dexdump/dexdump.h create mode 100644 dexdump/dexdump_cfg.cc create mode 100644 dexdump/dexdump_cfg.h create mode 100644 dexdump/dexdump_main.cc create mode 100644 dexdump/dexdump_test.cc create mode 100644 dexlayout/Android.bp create mode 100644 dexlayout/compact_dex_writer.cc create mode 100644 dexlayout/compact_dex_writer.h create mode 100644 dexlayout/dex_container.h create mode 100644 dexlayout/dex_ir.cc create mode 100644 dexlayout/dex_ir.h create mode 100644 dexlayout/dex_ir_builder.cc create mode 100644 dexlayout/dex_ir_builder.h create mode 100644 dexlayout/dex_verify.cc create mode 100644 dexlayout/dex_verify.h create mode 100644 dexlayout/dex_visualize.cc create mode 100644 dexlayout/dex_visualize.h create mode 100644 dexlayout/dex_writer.cc create mode 100644 dexlayout/dex_writer.h create mode 100644 dexlayout/dexdiag.cc create mode 100644 dexlayout/dexdiag_test.cc create mode 100644 dexlayout/dexlayout.cc create mode 100644 dexlayout/dexlayout.h create mode 100644 dexlayout/dexlayout_main.cc create mode 100644 dexlayout/dexlayout_test.cc create mode 100644 dexlist/Android.bp create mode 100644 dexlist/dexlist.cc create mode 100644 dexlist/dexlist_test.cc create mode 100644 dexoptanalyzer/Android.bp create mode 100644 dexoptanalyzer/dexoptanalyzer.cc create mode 100644 dexoptanalyzer/dexoptanalyzer_test.cc create mode 100644 disassembler/Android.bp create mode 100644 disassembler/disassembler.cc create mode 100644 disassembler/disassembler.h create mode 100644 disassembler/disassembler_arm.cc create mode 100644 disassembler/disassembler_arm.h create mode 100644 disassembler/disassembler_arm64.cc create mode 100644 disassembler/disassembler_arm64.h create mode 100644 disassembler/disassembler_x86.cc create mode 100644 disassembler/disassembler_x86.h create mode 100644 dt_fd_forward/Android.bp create mode 100644 dt_fd_forward/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION create mode 100644 dt_fd_forward/NOTICE create mode 100644 dt_fd_forward/README.md create mode 100644 dt_fd_forward/dt_fd_forward.cc create mode 100644 dt_fd_forward/dt_fd_forward.h create mode 100644 dt_fd_forward/export/Android.bp create mode 100644 dt_fd_forward/export/MODULE_LICENSE_APACHE2 create mode 100644 dt_fd_forward/export/fd_transport.h create mode 100644 imgdiag/Android.bp create mode 100644 imgdiag/imgdiag.cc create mode 100644 imgdiag/imgdiag_test.cc create mode 100644 libartbase/Android.bp create mode 100644 libartbase/arch/instruction_set.cc create mode 100644 libartbase/arch/instruction_set.h create mode 100644 libartbase/arch/instruction_set_test.cc create mode 100644 libartbase/base/aborting.h create mode 100644 libartbase/base/allocator.cc create mode 100644 libartbase/base/allocator.h create mode 100644 libartbase/base/arena_allocator-inl.h create mode 100644 libartbase/base/arena_allocator.cc create mode 100644 libartbase/base/arena_allocator.h create mode 100644 libartbase/base/arena_allocator_test.cc create mode 100644 libartbase/base/arena_bit_vector.cc create mode 100644 libartbase/base/arena_bit_vector.h create mode 100644 libartbase/base/arena_containers.h create mode 100644 libartbase/base/arena_object.h create mode 100644 libartbase/base/array_ref.h create mode 100644 libartbase/base/array_slice.h create mode 100644 libartbase/base/atomic.h create mode 100644 libartbase/base/bit_field.h create mode 100644 libartbase/base/bit_field_test.cc create mode 100644 libartbase/base/bit_memory_region.h create mode 100644 libartbase/base/bit_memory_region_test.cc create mode 100644 libartbase/base/bit_string.h create mode 100644 libartbase/base/bit_string_test.cc create mode 100644 libartbase/base/bit_struct.h create mode 100644 libartbase/base/bit_struct_detail.h create mode 100644 libartbase/base/bit_struct_test.cc create mode 100644 libartbase/base/bit_table.h create mode 100644 libartbase/base/bit_table_test.cc create mode 100644 libartbase/base/bit_utils.h create mode 100644 libartbase/base/bit_utils_iterator.h create mode 100644 libartbase/base/bit_utils_test.cc create mode 100644 libartbase/base/bit_vector-inl.h create mode 100644 libartbase/base/bit_vector.cc create mode 100644 libartbase/base/bit_vector.h create mode 100644 libartbase/base/bit_vector_test.cc create mode 100644 libartbase/base/bounded_fifo.h create mode 100644 libartbase/base/casts.h create mode 100644 libartbase/base/common_art_test.cc create mode 100644 libartbase/base/common_art_test.h create mode 100644 libartbase/base/data_hash.h create mode 100644 libartbase/base/dchecked_vector.h create mode 100644 libartbase/base/debug_stack.h create mode 100644 libartbase/base/dumpable.h create mode 100644 libartbase/base/endian_utils.h create mode 100644 libartbase/base/enums.cc create mode 100644 libartbase/base/enums.h create mode 100644 libartbase/base/file_magic.cc create mode 100644 libartbase/base/file_magic.h create mode 100644 libartbase/base/file_utils.cc create mode 100644 libartbase/base/file_utils.h create mode 100644 libartbase/base/file_utils_test.cc create mode 100644 libartbase/base/globals.h create mode 100644 libartbase/base/globals_unix.cc create mode 100644 libartbase/base/hash_map.h create mode 100644 libartbase/base/hash_set.h create mode 100644 libartbase/base/hash_set_test.cc create mode 100644 libartbase/base/hex_dump.cc create mode 100644 libartbase/base/hex_dump.h create mode 100644 libartbase/base/hex_dump_test.cc create mode 100644 libartbase/base/hiddenapi_domain.h create mode 100644 libartbase/base/hiddenapi_flags.cc create mode 100644 libartbase/base/hiddenapi_flags.h create mode 100644 libartbase/base/hiddenapi_stubs.h create mode 100644 libartbase/base/histogram-inl.h create mode 100644 libartbase/base/histogram.h create mode 100644 libartbase/base/histogram_test.cc create mode 100644 libartbase/base/indenter.h create mode 100644 libartbase/base/indenter_test.cc create mode 100644 libartbase/base/intrusive_forward_list.h create mode 100644 libartbase/base/intrusive_forward_list_test.cc create mode 100644 libartbase/base/iteration_range.h create mode 100644 libartbase/base/leb128.h create mode 100644 libartbase/base/leb128_test.cc create mode 100644 libartbase/base/length_prefixed_array.h create mode 100644 libartbase/base/logging.cc create mode 100644 libartbase/base/logging.h create mode 100644 libartbase/base/logging_test.cc create mode 100644 libartbase/base/macros.h create mode 100644 libartbase/base/malloc_arena_pool.cc create mode 100644 libartbase/base/malloc_arena_pool.h create mode 100644 libartbase/base/mem_map.cc create mode 100644 libartbase/base/mem_map.h create mode 100644 libartbase/base/mem_map_fuchsia.cc create mode 100644 libartbase/base/mem_map_test.cc create mode 100644 libartbase/base/mem_map_unix.cc create mode 100644 libartbase/base/mem_map_windows.cc create mode 100644 libartbase/base/membarrier.cc create mode 100644 libartbase/base/membarrier.h create mode 100644 libartbase/base/membarrier_test.cc create mode 100644 libartbase/base/memfd.cc create mode 100644 libartbase/base/memfd.h create mode 100644 libartbase/base/memfd_test.cc create mode 100644 libartbase/base/memory_region.cc create mode 100644 libartbase/base/memory_region.h create mode 100644 libartbase/base/memory_region_test.cc create mode 100644 libartbase/base/memory_tool.h create mode 100644 libartbase/base/mman.h create mode 100644 libartbase/base/os.h create mode 100644 libartbase/base/os_linux.cc create mode 100644 libartbase/base/runtime_debug.cc create mode 100644 libartbase/base/runtime_debug.h create mode 100644 libartbase/base/safe_copy.cc create mode 100644 libartbase/base/safe_copy.h create mode 100644 libartbase/base/safe_copy_test.cc create mode 100644 libartbase/base/safe_map.h create mode 100644 libartbase/base/scoped_arena_allocator.cc create mode 100644 libartbase/base/scoped_arena_allocator.h create mode 100644 libartbase/base/scoped_arena_containers.h create mode 100644 libartbase/base/scoped_flock.cc create mode 100644 libartbase/base/scoped_flock.h create mode 100644 libartbase/base/scoped_flock_test.cc create mode 100644 libartbase/base/sdk_version.h create mode 100644 libartbase/base/socket_peer_is_trusted.cc create mode 100644 libartbase/base/socket_peer_is_trusted.h create mode 100644 libartbase/base/stats.h create mode 100644 libartbase/base/stl_util.h create mode 100644 libartbase/base/stl_util_identity.h create mode 100644 libartbase/base/stride_iterator.h create mode 100644 libartbase/base/string_view_cpp20.h create mode 100644 libartbase/base/strlcpy.h create mode 100644 libartbase/base/systrace.h create mode 100644 libartbase/base/time_utils.cc create mode 100644 libartbase/base/time_utils.h create mode 100644 libartbase/base/time_utils_test.cc create mode 100644 libartbase/base/to_str.h create mode 100644 libartbase/base/tracking_safe_map.h create mode 100644 libartbase/base/transform_array_ref.h create mode 100644 libartbase/base/transform_array_ref_test.cc create mode 100644 libartbase/base/transform_iterator.h create mode 100644 libartbase/base/transform_iterator_test.cc create mode 100644 libartbase/base/unix_file/README create mode 100644 libartbase/base/unix_file/fd_file.cc create mode 100644 libartbase/base/unix_file/fd_file.h create mode 100644 libartbase/base/unix_file/fd_file_test.cc create mode 100644 libartbase/base/unix_file/random_access_file.h create mode 100644 libartbase/base/unix_file/random_access_file_test.h create mode 100644 libartbase/base/unix_file/random_access_file_utils.cc create mode 100644 libartbase/base/unix_file/random_access_file_utils.h create mode 100644 libartbase/base/utils.cc create mode 100644 libartbase/base/utils.h create mode 100644 libartbase/base/utils_test.cc create mode 100644 libartbase/base/value_object.h create mode 100644 libartbase/base/variant_map.h create mode 100644 libartbase/base/variant_map_test.cc create mode 100644 libartbase/base/zip_archive.cc create mode 100644 libartbase/base/zip_archive.h create mode 100644 libartbase/base/zip_archive_test.cc create mode 100644 libartbase/libartbase.map create mode 100644 libartimagevalues/Android.bp create mode 100644 libartimagevalues/art_image_values.cpp create mode 100644 libartimagevalues/art_image_values.h create mode 100644 libartpalette/Android.bp create mode 100644 libartpalette/apex/palette.cc create mode 100644 libartpalette/apex/palette_test.cc create mode 100644 libartpalette/include/palette/palette.h create mode 100644 libartpalette/include/palette/palette_method_list.h create mode 100644 libartpalette/include/palette/palette_types.h create mode 100644 libartpalette/libartpalette.map.txt create mode 100644 libartpalette/system/palette_fake.cc create mode 100644 libartpalette/system/palette_system.h create mode 100644 libdexfile/Android.bp create mode 100644 libdexfile/dex/art_dex_file_loader.cc create mode 100644 libdexfile/dex/art_dex_file_loader.h create mode 100644 libdexfile/dex/art_dex_file_loader_test.cc create mode 100644 libdexfile/dex/base64_test_util.h create mode 100644 libdexfile/dex/bytecode_utils.h create mode 100644 libdexfile/dex/class_accessor-inl.h create mode 100644 libdexfile/dex/class_accessor.h create mode 100644 libdexfile/dex/class_accessor_test.cc create mode 100644 libdexfile/dex/class_iterator.h create mode 100644 libdexfile/dex/class_reference.h create mode 100644 libdexfile/dex/code_item_accessors-inl.h create mode 100644 libdexfile/dex/code_item_accessors.h create mode 100644 libdexfile/dex/code_item_accessors_test.cc create mode 100644 libdexfile/dex/compact_dex_file.cc create mode 100644 libdexfile/dex/compact_dex_file.h create mode 100644 libdexfile/dex/compact_dex_file_test.cc create mode 100644 libdexfile/dex/compact_dex_level.h create mode 100644 libdexfile/dex/compact_dex_utils.h create mode 100644 libdexfile/dex/compact_offset_table.cc create mode 100644 libdexfile/dex/compact_offset_table.h create mode 100644 libdexfile/dex/compact_offset_table_test.cc create mode 100644 libdexfile/dex/descriptors_names.cc create mode 100644 libdexfile/dex/descriptors_names.h create mode 100644 libdexfile/dex/descriptors_names_test.cc create mode 100644 libdexfile/dex/dex_file-inl.h create mode 100644 libdexfile/dex/dex_file.cc create mode 100644 libdexfile/dex/dex_file.h create mode 100644 libdexfile/dex/dex_file_exception_helpers.cc create mode 100644 libdexfile/dex/dex_file_exception_helpers.h create mode 100644 libdexfile/dex/dex_file_layout.cc create mode 100644 libdexfile/dex/dex_file_layout.h create mode 100644 libdexfile/dex/dex_file_loader.cc create mode 100644 libdexfile/dex/dex_file_loader.h create mode 100644 libdexfile/dex/dex_file_loader_test.cc create mode 100644 libdexfile/dex/dex_file_reference.h create mode 100644 libdexfile/dex/dex_file_structs.h create mode 100644 libdexfile/dex/dex_file_tracking_registrar.cc create mode 100644 libdexfile/dex/dex_file_tracking_registrar.h create mode 100644 libdexfile/dex/dex_file_types.h create mode 100644 libdexfile/dex/dex_file_verifier.cc create mode 100644 libdexfile/dex/dex_file_verifier.h create mode 100644 libdexfile/dex/dex_file_verifier_test.cc create mode 100644 libdexfile/dex/dex_instruction-inl.h create mode 100644 libdexfile/dex/dex_instruction.cc create mode 100644 libdexfile/dex/dex_instruction.h create mode 100644 libdexfile/dex/dex_instruction_iterator.h create mode 100644 libdexfile/dex/dex_instruction_list.h create mode 100644 libdexfile/dex/dex_instruction_test.cc create mode 100644 libdexfile/dex/dex_instruction_utils.h create mode 100644 libdexfile/dex/invoke_type.h create mode 100644 libdexfile/dex/method_reference.h create mode 100644 libdexfile/dex/modifiers.cc create mode 100644 libdexfile/dex/modifiers.h create mode 100644 libdexfile/dex/primitive.cc create mode 100644 libdexfile/dex/primitive.h create mode 100644 libdexfile/dex/primitive_test.cc create mode 100644 libdexfile/dex/signature-inl.h create mode 100644 libdexfile/dex/signature.cc create mode 100644 libdexfile/dex/signature.h create mode 100644 libdexfile/dex/standard_dex_file.cc create mode 100644 libdexfile/dex/standard_dex_file.h create mode 100644 libdexfile/dex/string_reference.h create mode 100644 libdexfile/dex/string_reference_test.cc create mode 100644 libdexfile/dex/test_dex_file_builder.h create mode 100644 libdexfile/dex/test_dex_file_builder_test.cc create mode 100644 libdexfile/dex/type_lookup_table.cc create mode 100644 libdexfile/dex/type_lookup_table.h create mode 100644 libdexfile/dex/type_lookup_table_test.cc create mode 100644 libdexfile/dex/type_reference.h create mode 100644 libdexfile/dex/utf-inl.h create mode 100644 libdexfile/dex/utf.cc create mode 100644 libdexfile/dex/utf.h create mode 100644 libdexfile/dex/utf_test.cc create mode 100644 libdexfile/external/dex_file_ext.cc create mode 100644 libdexfile/external/dex_file_ext_c_test.c create mode 100644 libdexfile/external/dex_file_supp.cc create mode 100644 libdexfile/external/dex_file_supp_test.cc create mode 100644 libdexfile/external/include/art_api/dex_file_external.h create mode 100644 libdexfile/external/include/art_api/dex_file_support.h create mode 100644 libdexfile/external/libdexfile_external.map.txt create mode 100644 libelffile/Android.bp create mode 100644 libelffile/dwarf/debug_abbrev_writer.h create mode 100644 libelffile/dwarf/debug_frame_opcode_writer.h create mode 100644 libelffile/dwarf/debug_info_entry_writer.h create mode 100644 libelffile/dwarf/debug_line_opcode_writer.h create mode 100644 libelffile/dwarf/dwarf_constants.h create mode 100644 libelffile/dwarf/expression.h create mode 100644 libelffile/dwarf/headers.h create mode 100644 libelffile/dwarf/register.h create mode 100644 libelffile/dwarf/writer.h create mode 100644 libelffile/elf/elf_builder.h create mode 100644 libelffile/elf/elf_debug_reader.h create mode 100644 libelffile/elf/elf_utils.h create mode 100644 libelffile/elf/xz_utils.cc create mode 100644 libelffile/elf/xz_utils.h create mode 100644 libelffile/stream/buffered_output_stream.cc create mode 100644 libelffile/stream/buffered_output_stream.h create mode 100644 libelffile/stream/error_delaying_output_stream.h create mode 100644 libelffile/stream/file_output_stream.cc create mode 100644 libelffile/stream/file_output_stream.h create mode 100644 libelffile/stream/output_stream.cc create mode 100644 libelffile/stream/output_stream.h create mode 100644 libelffile/stream/vector_output_stream.cc create mode 100644 libelffile/stream/vector_output_stream.h create mode 120000 libnativebridge/.clang-format create mode 100644 libnativebridge/Android.bp create mode 100644 libnativebridge/OWNERS create mode 100644 libnativebridge/include/nativebridge/native_bridge.h create mode 100644 libnativebridge/libnativebridge.map.txt create mode 100644 libnativebridge/native_bridge.cc create mode 100644 libnativebridge/native_bridge_lazy.cc create mode 100644 libnativebridge/tests/Android.bp create mode 100644 libnativebridge/tests/CodeCacheCreate_test.cpp create mode 100644 libnativebridge/tests/CodeCacheExists_test.cpp create mode 100644 libnativebridge/tests/CodeCacheStatFail_test.cpp create mode 100644 libnativebridge/tests/CompleteFlow_test.cpp create mode 100644 libnativebridge/tests/DummyNativeBridge.cpp create mode 100644 libnativebridge/tests/DummyNativeBridge2.cpp create mode 100644 libnativebridge/tests/DummyNativeBridge3.cpp create mode 100644 libnativebridge/tests/DummyNativeBridge6.cpp create mode 100644 libnativebridge/tests/InvalidCharsNativeBridge_test.cpp create mode 100644 libnativebridge/tests/NativeBridge2Signal_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3CreateNamespace_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3GetError_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3InitAnonymousNamespace_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3IsPathSupported_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3LoadLibraryExt_test.cpp create mode 100644 libnativebridge/tests/NativeBridge3UnloadLibrary_test.cpp create mode 100644 libnativebridge/tests/NativeBridge6PreZygoteFork_lib.cpp create mode 100644 libnativebridge/tests/NativeBridge6PreZygoteFork_lib.h create mode 100644 libnativebridge/tests/NativeBridge6PreZygoteFork_test.cpp create mode 100644 libnativebridge/tests/NativeBridgeApi.c create mode 100644 libnativebridge/tests/NativeBridgeTest.h create mode 100644 libnativebridge/tests/NativeBridgeVersion_test.cpp create mode 100644 libnativebridge/tests/NeedsNativeBridge_test.cpp create mode 100644 libnativebridge/tests/PreInitializeNativeBridgeFail1_test.cpp create mode 100644 libnativebridge/tests/PreInitializeNativeBridgeFail2_test.cpp create mode 100644 libnativebridge/tests/PreInitializeNativeBridge_test.cpp create mode 100644 libnativebridge/tests/ReSetupNativeBridge_test.cpp create mode 100644 libnativebridge/tests/UnavailableNativeBridge_test.cpp create mode 100644 libnativebridge/tests/ValidNameNativeBridge_test.cpp create mode 120000 libnativeloader/.clang-format create mode 100644 libnativeloader/Android.bp create mode 100644 libnativeloader/OWNERS create mode 100644 libnativeloader/README.md create mode 100644 libnativeloader/TEST_MAPPING create mode 100644 libnativeloader/include/nativeloader/dlext_namespaces.h create mode 100644 libnativeloader/include/nativeloader/native_loader.h create mode 100644 libnativeloader/libnativeloader.map.txt create mode 100644 libnativeloader/library_namespaces.cpp create mode 100644 libnativeloader/library_namespaces.h create mode 100644 libnativeloader/native_loader.cpp create mode 100644 libnativeloader/native_loader_lazy.cpp create mode 100644 libnativeloader/native_loader_namespace.cpp create mode 100644 libnativeloader/native_loader_namespace.h create mode 100644 libnativeloader/native_loader_test.cpp create mode 100644 libnativeloader/public_libraries.cpp create mode 100644 libnativeloader/public_libraries.h create mode 100644 libnativeloader/test/Android.bp create mode 100644 libnativeloader/test/Android.mk create mode 100644 libnativeloader/test/api_test.c create mode 100644 libnativeloader/test/public.libraries-oem1.txt create mode 100644 libnativeloader/test/public.libraries-oem2.txt create mode 100644 libnativeloader/test/public.libraries-product1.txt create mode 100755 libnativeloader/test/runtest.sh create mode 100644 libnativeloader/test/src/android/test/app/TestActivity.java create mode 100644 libnativeloader/test/system/AndroidManifest.xml create mode 100644 libnativeloader/test/test.cpp create mode 100644 libnativeloader/test/vendor/AndroidManifest.xml create mode 100644 libnativeloader/utils.h create mode 100644 libprofile/Android.bp create mode 100644 libprofile/profile/profile_boot_info.cc create mode 100644 libprofile/profile/profile_boot_info.h create mode 100644 libprofile/profile/profile_boot_info_test.cc create mode 100644 libprofile/profile/profile_compilation_info.cc create mode 100644 libprofile/profile/profile_compilation_info.h create mode 100644 libprofile/profile/profile_compilation_info_test.cc create mode 100644 libprofile/profile/profile_helpers.h create mode 100644 oatdump/Android.bp create mode 100644 oatdump/Android.mk create mode 100644 oatdump/oatdump.cc create mode 100644 oatdump/oatdump_app_test.cc create mode 100644 oatdump/oatdump_image_test.cc create mode 100644 oatdump/oatdump_test.cc create mode 100644 oatdump/oatdump_test.h create mode 100644 openjdkjvm/Android.bp create mode 100644 openjdkjvm/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION create mode 100644 openjdkjvm/NOTICE create mode 100644 openjdkjvm/OpenjdkJvm.cc create mode 100644 openjdkjvmti/Android.bp create mode 100644 openjdkjvmti/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION create mode 100644 openjdkjvmti/NOTICE create mode 100644 openjdkjvmti/OpenjdkJvmTi.cc create mode 100644 openjdkjvmti/README.md create mode 100644 openjdkjvmti/alloc_manager.cc create mode 100644 openjdkjvmti/alloc_manager.h create mode 100644 openjdkjvmti/art_jvmti.h create mode 100644 openjdkjvmti/deopt_manager.cc create mode 100644 openjdkjvmti/deopt_manager.h create mode 100644 openjdkjvmti/events-inl.h create mode 100644 openjdkjvmti/events.cc create mode 100644 openjdkjvmti/events.h create mode 100644 openjdkjvmti/fixed_up_dex_file.cc create mode 100644 openjdkjvmti/fixed_up_dex_file.h create mode 100644 openjdkjvmti/include/CPPLINT.cfg create mode 100644 openjdkjvmti/include/jvmti.h create mode 100644 openjdkjvmti/jvmti_allocator.h create mode 100644 openjdkjvmti/jvmti_weak_table-inl.h create mode 100644 openjdkjvmti/jvmti_weak_table.h create mode 100644 openjdkjvmti/object_tagging.cc create mode 100644 openjdkjvmti/object_tagging.h create mode 100644 openjdkjvmti/ti_allocator.cc create mode 100644 openjdkjvmti/ti_allocator.h create mode 100644 openjdkjvmti/ti_breakpoint.cc create mode 100644 openjdkjvmti/ti_breakpoint.h create mode 100644 openjdkjvmti/ti_class.cc create mode 100644 openjdkjvmti/ti_class.h create mode 100644 openjdkjvmti/ti_class_definition.cc create mode 100644 openjdkjvmti/ti_class_definition.h create mode 100644 openjdkjvmti/ti_class_loader-inl.h create mode 100644 openjdkjvmti/ti_class_loader.cc create mode 100644 openjdkjvmti/ti_class_loader.h create mode 100644 openjdkjvmti/ti_ddms.cc create mode 100644 openjdkjvmti/ti_ddms.h create mode 100644 openjdkjvmti/ti_dump.cc create mode 100644 openjdkjvmti/ti_dump.h create mode 100644 openjdkjvmti/ti_extension.cc create mode 100644 openjdkjvmti/ti_extension.h create mode 100644 openjdkjvmti/ti_field.cc create mode 100644 openjdkjvmti/ti_field.h create mode 100644 openjdkjvmti/ti_heap.cc create mode 100644 openjdkjvmti/ti_heap.h create mode 100644 openjdkjvmti/ti_jni.cc create mode 100644 openjdkjvmti/ti_jni.h create mode 100644 openjdkjvmti/ti_logging.cc create mode 100644 openjdkjvmti/ti_logging.h create mode 100644 openjdkjvmti/ti_method.cc create mode 100644 openjdkjvmti/ti_method.h create mode 100644 openjdkjvmti/ti_monitor.cc create mode 100644 openjdkjvmti/ti_monitor.h create mode 100644 openjdkjvmti/ti_object.cc create mode 100644 openjdkjvmti/ti_object.h create mode 100644 openjdkjvmti/ti_phase.cc create mode 100644 openjdkjvmti/ti_phase.h create mode 100644 openjdkjvmti/ti_properties.cc create mode 100644 openjdkjvmti/ti_properties.h create mode 100644 openjdkjvmti/ti_redefine.cc create mode 100644 openjdkjvmti/ti_redefine.h create mode 100644 openjdkjvmti/ti_search.cc create mode 100644 openjdkjvmti/ti_search.h create mode 100644 openjdkjvmti/ti_stack.cc create mode 100644 openjdkjvmti/ti_stack.h create mode 100644 openjdkjvmti/ti_thread.cc create mode 100644 openjdkjvmti/ti_thread.h create mode 100644 openjdkjvmti/ti_threadgroup.cc create mode 100644 openjdkjvmti/ti_threadgroup.h create mode 100644 openjdkjvmti/ti_timers.cc create mode 100644 openjdkjvmti/ti_timers.h create mode 100644 openjdkjvmti/transform.cc create mode 100644 openjdkjvmti/transform.h create mode 100644 perfetto_hprof/Android.bp create mode 100644 perfetto_hprof/perfetto_hprof.cc create mode 100644 perfetto_hprof/perfetto_hprof.h create mode 100644 profman/Android.bp create mode 100644 profman/boot_image_profile.cc create mode 100644 profman/boot_image_profile.h create mode 100644 profman/profile_assistant.cc create mode 100644 profman/profile_assistant.h create mode 100644 profman/profile_assistant_test.cc create mode 100644 profman/profman.cc create mode 100644 runtime/Android.bp create mode 100644 runtime/aot_class_linker.cc create mode 100644 runtime/aot_class_linker.h create mode 100644 runtime/arch/arch_test.cc create mode 100644 runtime/arch/arm/asm_support_arm.S create mode 100644 runtime/arch/arm/asm_support_arm.h create mode 100644 runtime/arch/arm/callee_save_frame_arm.h create mode 100644 runtime/arch/arm/context_arm.cc create mode 100644 runtime/arch/arm/context_arm.h create mode 100644 runtime/arch/arm/entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/fault_handler_arm.cc create mode 100644 runtime/arch/arm/instruction_set_features_arm.cc create mode 100644 runtime/arch/arm/instruction_set_features_arm.h create mode 100644 runtime/arch/arm/instruction_set_features_arm_test.cc create mode 100644 runtime/arch/arm/instruction_set_features_assembly_tests.S create mode 100644 runtime/arch/arm/jni_entrypoints_arm.S create mode 100644 runtime/arch/arm/jni_frame_arm.h create mode 100644 runtime/arch/arm/memcmp16_arm.S create mode 100644 runtime/arch/arm/quick_entrypoints_arm.S create mode 100644 runtime/arch/arm/quick_entrypoints_cc_arm.cc create mode 100644 runtime/arch/arm/registers_arm.cc create mode 100644 runtime/arch/arm/registers_arm.h create mode 100644 runtime/arch/arm/thread_arm.cc create mode 100644 runtime/arch/arm64/asm_support_arm64.S create mode 100644 runtime/arch/arm64/asm_support_arm64.h create mode 100644 runtime/arch/arm64/callee_save_frame_arm64.h create mode 100644 runtime/arch/arm64/context_arm64.cc create mode 100644 runtime/arch/arm64/context_arm64.h create mode 100644 runtime/arch/arm64/entrypoints_init_arm64.cc create mode 100644 runtime/arch/arm64/fault_handler_arm64.cc create mode 100644 runtime/arch/arm64/instruction_set_features_arm64.cc create mode 100644 runtime/arch/arm64/instruction_set_features_arm64.h create mode 100644 runtime/arch/arm64/instruction_set_features_arm64_test.cc create mode 100644 runtime/arch/arm64/jni_entrypoints_arm64.S create mode 100644 runtime/arch/arm64/jni_frame_arm64.h create mode 100644 runtime/arch/arm64/memcmp16_arm64.S create mode 100644 runtime/arch/arm64/quick_entrypoints_arm64.S create mode 100644 runtime/arch/arm64/registers_arm64.cc create mode 100644 runtime/arch/arm64/registers_arm64.h create mode 100644 runtime/arch/arm64/thread_arm64.cc create mode 100644 runtime/arch/context-inl.h create mode 100644 runtime/arch/context.cc create mode 100644 runtime/arch/context.h create mode 100644 runtime/arch/instruction_set_features.cc create mode 100644 runtime/arch/instruction_set_features.h create mode 100644 runtime/arch/instruction_set_features_test.cc create mode 100644 runtime/arch/memcmp16.cc create mode 100644 runtime/arch/memcmp16.h create mode 100644 runtime/arch/memcmp16_test.cc create mode 100644 runtime/arch/quick_alloc_entrypoints.S create mode 100644 runtime/arch/stub_test.cc create mode 100644 runtime/arch/x86/asm_support_x86.S create mode 100644 runtime/arch/x86/asm_support_x86.h create mode 100644 runtime/arch/x86/callee_save_frame_x86.h create mode 100644 runtime/arch/x86/context_x86.cc create mode 100644 runtime/arch/x86/context_x86.h create mode 100644 runtime/arch/x86/entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/fault_handler_x86.cc create mode 100644 runtime/arch/x86/instruction_set_features_x86.cc create mode 100644 runtime/arch/x86/instruction_set_features_x86.h create mode 100644 runtime/arch/x86/instruction_set_features_x86_test.cc create mode 100644 runtime/arch/x86/jni_entrypoints_x86.S create mode 100644 runtime/arch/x86/jni_frame_x86.h create mode 100644 runtime/arch/x86/memcmp16_x86.S create mode 100644 runtime/arch/x86/quick_entrypoints_x86.S create mode 100644 runtime/arch/x86/registers_x86.cc create mode 100644 runtime/arch/x86/registers_x86.h create mode 100644 runtime/arch/x86/thread_x86.cc create mode 100644 runtime/arch/x86_64/asm_support_x86_64.S create mode 100644 runtime/arch/x86_64/asm_support_x86_64.h create mode 100644 runtime/arch/x86_64/callee_save_frame_x86_64.h create mode 100644 runtime/arch/x86_64/context_x86_64.cc create mode 100644 runtime/arch/x86_64/context_x86_64.h create mode 100644 runtime/arch/x86_64/entrypoints_init_x86_64.cc create mode 100644 runtime/arch/x86_64/instruction_set_features_x86_64.h create mode 100644 runtime/arch/x86_64/instruction_set_features_x86_64_test.cc create mode 100644 runtime/arch/x86_64/jni_entrypoints_x86_64.S create mode 100644 runtime/arch/x86_64/jni_frame_x86_64.h create mode 100755 runtime/arch/x86_64/memcmp16_x86_64.S create mode 100644 runtime/arch/x86_64/quick_entrypoints_x86_64.S create mode 100644 runtime/arch/x86_64/registers_x86_64.cc create mode 100644 runtime/arch/x86_64/registers_x86_64.h create mode 100644 runtime/arch/x86_64/thread_x86_64.cc create mode 100644 runtime/art_field-inl.h create mode 100644 runtime/art_field.cc create mode 100644 runtime/art_field.h create mode 100644 runtime/art_method-inl.h create mode 100644 runtime/art_method.cc create mode 100644 runtime/art_method.h create mode 100644 runtime/asm_support.h create mode 100644 runtime/backtrace_helper.cc create mode 100644 runtime/backtrace_helper.h create mode 100644 runtime/barrier.cc create mode 100644 runtime/barrier.h create mode 100644 runtime/barrier_test.cc create mode 100644 runtime/base/callee_save_type.h create mode 100644 runtime/base/locks.cc create mode 100644 runtime/base/locks.h create mode 100644 runtime/base/mem_map_arena_pool.cc create mode 100644 runtime/base/mem_map_arena_pool.h create mode 100644 runtime/base/mutator_locked_dumpable.h create mode 100644 runtime/base/mutex-inl.h create mode 100644 runtime/base/mutex.cc create mode 100644 runtime/base/mutex.h create mode 100644 runtime/base/mutex_test.cc create mode 100644 runtime/base/quasi_atomic.cc create mode 100644 runtime/base/quasi_atomic.h create mode 100644 runtime/base/timing_logger.cc create mode 100644 runtime/base/timing_logger.h create mode 100644 runtime/base/timing_logger_test.cc create mode 100644 runtime/cha.cc create mode 100644 runtime/cha.h create mode 100644 runtime/cha_test.cc create mode 100644 runtime/check_reference_map_visitor.h create mode 100644 runtime/class_linker-inl.h create mode 100644 runtime/class_linker.cc create mode 100644 runtime/class_linker.h create mode 100644 runtime/class_linker_test.cc create mode 100644 runtime/class_loader_context.cc create mode 100644 runtime/class_loader_context.h create mode 100644 runtime/class_loader_context_test.cc create mode 100644 runtime/class_loader_utils.h create mode 100644 runtime/class_root.cc create mode 100644 runtime/class_root.h create mode 100644 runtime/class_status.h create mode 100644 runtime/class_table-inl.h create mode 100644 runtime/class_table.cc create mode 100644 runtime/class_table.h create mode 100644 runtime/class_table_test.cc create mode 100644 runtime/common_dex_operations.h create mode 100644 runtime/common_runtime_test.cc create mode 100644 runtime/common_runtime_test.h create mode 100644 runtime/common_throws.cc create mode 100644 runtime/common_throws.h create mode 100644 runtime/compiler_callbacks.h create mode 100644 runtime/compiler_filter.cc create mode 100644 runtime/compiler_filter.h create mode 100644 runtime/compiler_filter_test.cc create mode 100644 runtime/debug_print.cc create mode 100644 runtime/debug_print.h create mode 100644 runtime/debugger.cc create mode 100644 runtime/debugger.h create mode 100644 runtime/deoptimization_kind.h create mode 100644 runtime/dex/dex_file_annotations.cc create mode 100644 runtime/dex/dex_file_annotations.h create mode 100644 runtime/dex2oat_environment_test.h create mode 100644 runtime/dex_reference_collection.h create mode 100644 runtime/dex_register_location.cc create mode 100644 runtime/dex_register_location.h create mode 100644 runtime/dex_to_dex_decompiler.cc create mode 100644 runtime/dex_to_dex_decompiler.h create mode 100644 runtime/dexopt_test.cc create mode 100644 runtime/dexopt_test.h create mode 100644 runtime/elf_file.cc create mode 100644 runtime/elf_file.h create mode 100644 runtime/elf_file_impl.h create mode 100644 runtime/entrypoints/entrypoint_utils-inl.h create mode 100644 runtime/entrypoints/entrypoint_utils.cc create mode 100644 runtime/entrypoints/entrypoint_utils.h create mode 100644 runtime/entrypoints/jni/jni_entrypoints.cc create mode 100644 runtime/entrypoints/jni/jni_entrypoints.h create mode 100644 runtime/entrypoints/math_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.h create mode 100644 runtime/entrypoints/math_entrypoints_test.cc create mode 100644 runtime/entrypoints/quick/callee_save_frame.h create mode 100644 runtime/entrypoints/quick/quick_alloc_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_alloc_entrypoints.h create mode 100644 runtime/entrypoints/quick/quick_cast_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_default_externs.h create mode 100644 runtime/entrypoints/quick/quick_default_init_entrypoints.h create mode 100644 runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_dexcache_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_entrypoints.h create mode 100644 runtime/entrypoints/quick/quick_entrypoints_enum.cc create mode 100644 runtime/entrypoints/quick/quick_entrypoints_enum.h create mode 100644 runtime/entrypoints/quick/quick_entrypoints_list.h create mode 100644 runtime/entrypoints/quick/quick_field_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_fillarray_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_jni_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_lock_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_math_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_string_builder_append_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_thread_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_throw_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_trampoline_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc create mode 100644 runtime/entrypoints/runtime_asm_entrypoints.h create mode 100644 runtime/entrypoints_order_test.cc create mode 100644 runtime/exec_utils.cc create mode 100644 runtime/exec_utils.h create mode 100644 runtime/exec_utils_test.cc create mode 100644 runtime/experimental_flags.h create mode 100644 runtime/fault_handler.cc create mode 100644 runtime/fault_handler.h create mode 100644 runtime/gc/accounting/atomic_stack.h create mode 100644 runtime/gc/accounting/bitmap-inl.h create mode 100644 runtime/gc/accounting/bitmap.cc create mode 100644 runtime/gc/accounting/bitmap.h create mode 100644 runtime/gc/accounting/card_table-inl.h create mode 100644 runtime/gc/accounting/card_table.cc create mode 100644 runtime/gc/accounting/card_table.h create mode 100644 runtime/gc/accounting/card_table_test.cc create mode 100644 runtime/gc/accounting/heap_bitmap-inl.h create mode 100644 runtime/gc/accounting/heap_bitmap.cc create mode 100644 runtime/gc/accounting/heap_bitmap.h create mode 100644 runtime/gc/accounting/mod_union_table-inl.h create mode 100644 runtime/gc/accounting/mod_union_table.cc create mode 100644 runtime/gc/accounting/mod_union_table.h create mode 100644 runtime/gc/accounting/mod_union_table_test.cc create mode 100644 runtime/gc/accounting/read_barrier_table.h create mode 100644 runtime/gc/accounting/remembered_set.cc create mode 100644 runtime/gc/accounting/remembered_set.h create mode 100644 runtime/gc/accounting/space_bitmap-inl.h create mode 100644 runtime/gc/accounting/space_bitmap.cc create mode 100644 runtime/gc/accounting/space_bitmap.h create mode 100644 runtime/gc/accounting/space_bitmap_test.cc create mode 100644 runtime/gc/allocation_listener.h create mode 100644 runtime/gc/allocation_record.cc create mode 100644 runtime/gc/allocation_record.h create mode 100644 runtime/gc/allocator/dlmalloc.cc create mode 100644 runtime/gc/allocator/dlmalloc.h create mode 100644 runtime/gc/allocator/rosalloc-inl.h create mode 100644 runtime/gc/allocator/rosalloc.cc create mode 100644 runtime/gc/allocator/rosalloc.h create mode 100644 runtime/gc/allocator_type.h create mode 100644 runtime/gc/collector/concurrent_copying-inl.h create mode 100644 runtime/gc/collector/concurrent_copying.cc create mode 100644 runtime/gc/collector/concurrent_copying.h create mode 100644 runtime/gc/collector/garbage_collector.cc create mode 100644 runtime/gc/collector/garbage_collector.h create mode 100644 runtime/gc/collector/gc_type.h create mode 100644 runtime/gc/collector/immune_region.cc create mode 100644 runtime/gc/collector/immune_region.h create mode 100644 runtime/gc/collector/immune_spaces.cc create mode 100644 runtime/gc/collector/immune_spaces.h create mode 100644 runtime/gc/collector/immune_spaces_test.cc create mode 100644 runtime/gc/collector/iteration.h create mode 100644 runtime/gc/collector/mark_sweep-inl.h create mode 100644 runtime/gc/collector/mark_sweep.cc create mode 100644 runtime/gc/collector/mark_sweep.h create mode 100644 runtime/gc/collector/object_byte_pair.h create mode 100644 runtime/gc/collector/partial_mark_sweep.cc create mode 100644 runtime/gc/collector/partial_mark_sweep.h create mode 100644 runtime/gc/collector/semi_space-inl.h create mode 100644 runtime/gc/collector/semi_space.cc create mode 100644 runtime/gc/collector/semi_space.h create mode 100644 runtime/gc/collector/sticky_mark_sweep.cc create mode 100644 runtime/gc/collector/sticky_mark_sweep.h create mode 100644 runtime/gc/collector_type.h create mode 100644 runtime/gc/gc_cause.cc create mode 100644 runtime/gc/gc_cause.h create mode 100644 runtime/gc/gc_pause_listener.h create mode 100644 runtime/gc/heap-inl.h create mode 100644 runtime/gc/heap-visit-objects-inl.h create mode 100644 runtime/gc/heap.cc create mode 100644 runtime/gc/heap.h create mode 100644 runtime/gc/heap_test.cc create mode 100644 runtime/gc/heap_verification_test.cc create mode 100644 runtime/gc/racing_check.h create mode 100644 runtime/gc/reference_processor.cc create mode 100644 runtime/gc/reference_processor.h create mode 100644 runtime/gc/reference_queue.cc create mode 100644 runtime/gc/reference_queue.h create mode 100644 runtime/gc/reference_queue_test.cc create mode 100644 runtime/gc/scoped_gc_critical_section.cc create mode 100644 runtime/gc/scoped_gc_critical_section.h create mode 100644 runtime/gc/space/bump_pointer_space-inl.h create mode 100644 runtime/gc/space/bump_pointer_space-walk-inl.h create mode 100644 runtime/gc/space/bump_pointer_space.cc create mode 100644 runtime/gc/space/bump_pointer_space.h create mode 100644 runtime/gc/space/dlmalloc_space-inl.h create mode 100644 runtime/gc/space/dlmalloc_space.cc create mode 100644 runtime/gc/space/dlmalloc_space.h create mode 100644 runtime/gc/space/dlmalloc_space_random_test.cc create mode 100644 runtime/gc/space/dlmalloc_space_static_test.cc create mode 100644 runtime/gc/space/image_space.cc create mode 100644 runtime/gc/space/image_space.h create mode 100644 runtime/gc/space/image_space_fs.h create mode 100644 runtime/gc/space/image_space_loading_order.h create mode 100644 runtime/gc/space/image_space_test.cc create mode 100644 runtime/gc/space/large_object_space.cc create mode 100644 runtime/gc/space/large_object_space.h create mode 100644 runtime/gc/space/large_object_space_test.cc create mode 100644 runtime/gc/space/malloc_space.cc create mode 100644 runtime/gc/space/malloc_space.h create mode 100644 runtime/gc/space/memory_tool_malloc_space-inl.h create mode 100644 runtime/gc/space/memory_tool_malloc_space.h create mode 100644 runtime/gc/space/memory_tool_settings.h create mode 100644 runtime/gc/space/region_space-inl.h create mode 100644 runtime/gc/space/region_space.cc create mode 100644 runtime/gc/space/region_space.h create mode 100644 runtime/gc/space/rosalloc_space-inl.h create mode 100644 runtime/gc/space/rosalloc_space.cc create mode 100644 runtime/gc/space/rosalloc_space.h create mode 100644 runtime/gc/space/rosalloc_space_random_test.cc create mode 100644 runtime/gc/space/rosalloc_space_static_test.cc create mode 100644 runtime/gc/space/space-inl.h create mode 100644 runtime/gc/space/space.cc create mode 100644 runtime/gc/space/space.h create mode 100644 runtime/gc/space/space_create_test.cc create mode 100644 runtime/gc/space/space_test.h create mode 100644 runtime/gc/space/zygote_space.cc create mode 100644 runtime/gc/space/zygote_space.h create mode 100644 runtime/gc/system_weak.h create mode 100644 runtime/gc/system_weak_test.cc create mode 100644 runtime/gc/task_processor.cc create mode 100644 runtime/gc/task_processor.h create mode 100644 runtime/gc/task_processor_test.cc create mode 100644 runtime/gc/verification.cc create mode 100644 runtime/gc/verification.h create mode 100644 runtime/gc/weak_root_state.h create mode 100644 runtime/gc_root-inl.h create mode 100644 runtime/gc_root.h create mode 100644 runtime/gtest_test.cc create mode 100644 runtime/handle.h create mode 100644 runtime/handle_scope-inl.h create mode 100644 runtime/handle_scope.h create mode 100644 runtime/handle_scope_test.cc create mode 100644 runtime/handle_wrapper.h create mode 100644 runtime/heap_poisoning.h create mode 100644 runtime/hidden_api.cc create mode 100644 runtime/hidden_api.h create mode 100644 runtime/hidden_api_test.cc create mode 100644 runtime/hprof/hprof.cc create mode 100644 runtime/hprof/hprof.h create mode 100644 runtime/image-inl.h create mode 100644 runtime/image.cc create mode 100644 runtime/image.h create mode 100644 runtime/imt_conflict_table.h create mode 100644 runtime/imtable-inl.h create mode 100644 runtime/imtable.h create mode 100644 runtime/imtable_test.cc create mode 100644 runtime/index_bss_mapping.cc create mode 100644 runtime/index_bss_mapping.h create mode 100644 runtime/indirect_reference_table-inl.h create mode 100644 runtime/indirect_reference_table.cc create mode 100644 runtime/indirect_reference_table.h create mode 100644 runtime/indirect_reference_table_test.cc create mode 100644 runtime/instrumentation.cc create mode 100644 runtime/instrumentation.h create mode 100644 runtime/instrumentation_test.cc create mode 100644 runtime/intern_table-inl.h create mode 100644 runtime/intern_table.cc create mode 100644 runtime/intern_table.h create mode 100644 runtime/intern_table_test.cc create mode 100644 runtime/interpreter/cfi_asm_support.h create mode 100644 runtime/interpreter/interpreter.cc create mode 100644 runtime/interpreter/interpreter.h create mode 100644 runtime/interpreter/interpreter_cache.cc create mode 100644 runtime/interpreter/interpreter_cache.h create mode 100644 runtime/interpreter/interpreter_common.cc create mode 100644 runtime/interpreter/interpreter_common.h create mode 100644 runtime/interpreter/interpreter_intrinsics.cc create mode 100644 runtime/interpreter/interpreter_intrinsics.h create mode 100644 runtime/interpreter/interpreter_mterp_impl.h create mode 100644 runtime/interpreter/interpreter_switch_impl-inl.h create mode 100644 runtime/interpreter/interpreter_switch_impl.h create mode 100644 runtime/interpreter/interpreter_switch_impl0.cc create mode 100644 runtime/interpreter/interpreter_switch_impl1.cc create mode 100644 runtime/interpreter/interpreter_switch_impl2.cc create mode 100644 runtime/interpreter/interpreter_switch_impl3.cc create mode 100644 runtime/interpreter/lock_count_data.cc create mode 100644 runtime/interpreter/lock_count_data.h create mode 100644 runtime/interpreter/mterp/README.txt create mode 100644 runtime/interpreter/mterp/arm/arithmetic.S create mode 100644 runtime/interpreter/mterp/arm/array.S create mode 100644 runtime/interpreter/mterp/arm/control_flow.S create mode 100644 runtime/interpreter/mterp/arm/floating_point.S create mode 100644 runtime/interpreter/mterp/arm/invoke.S create mode 100644 runtime/interpreter/mterp/arm/main.S create mode 100644 runtime/interpreter/mterp/arm/object.S create mode 100644 runtime/interpreter/mterp/arm/other.S create mode 100644 runtime/interpreter/mterp/arm64/arithmetic.S create mode 100644 runtime/interpreter/mterp/arm64/array.S create mode 100644 runtime/interpreter/mterp/arm64/control_flow.S create mode 100644 runtime/interpreter/mterp/arm64/floating_point.S create mode 100644 runtime/interpreter/mterp/arm64/invoke.S create mode 100644 runtime/interpreter/mterp/arm64/main.S create mode 100644 runtime/interpreter/mterp/arm64/object.S create mode 100644 runtime/interpreter/mterp/arm64/other.S create mode 100644 runtime/interpreter/mterp/common/gen_setup.py create mode 100755 runtime/interpreter/mterp/gen_mterp.py create mode 100644 runtime/interpreter/mterp/mterp.cc create mode 100644 runtime/interpreter/mterp/mterp.h create mode 100644 runtime/interpreter/mterp/mterp_stub.cc create mode 100644 runtime/interpreter/mterp/nterp.cc create mode 100644 runtime/interpreter/mterp/nterp_stub.cc create mode 100644 runtime/interpreter/mterp/x86/arithmetic.S create mode 100644 runtime/interpreter/mterp/x86/array.S create mode 100644 runtime/interpreter/mterp/x86/control_flow.S create mode 100644 runtime/interpreter/mterp/x86/floating_point.S create mode 100644 runtime/interpreter/mterp/x86/invoke.S create mode 100644 runtime/interpreter/mterp/x86/main.S create mode 100644 runtime/interpreter/mterp/x86/object.S create mode 100644 runtime/interpreter/mterp/x86/other.S create mode 100644 runtime/interpreter/mterp/x86_64/arithmetic.S create mode 100644 runtime/interpreter/mterp/x86_64/array.S create mode 100644 runtime/interpreter/mterp/x86_64/control_flow.S create mode 100644 runtime/interpreter/mterp/x86_64/floating_point.S create mode 100644 runtime/interpreter/mterp/x86_64/invoke.S create mode 100644 runtime/interpreter/mterp/x86_64/main.S create mode 100644 runtime/interpreter/mterp/x86_64/object.S create mode 100644 runtime/interpreter/mterp/x86_64/other.S create mode 100644 runtime/interpreter/mterp/x86_64ng/array.S create mode 100644 runtime/interpreter/mterp/x86_64ng/control_flow.S create mode 100644 runtime/interpreter/mterp/x86_64ng/invoke.S create mode 100644 runtime/interpreter/mterp/x86_64ng/main.S create mode 100644 runtime/interpreter/mterp/x86_64ng/object.S create mode 100644 runtime/interpreter/mterp/x86_64ng/other.S create mode 100644 runtime/interpreter/safe_math.h create mode 100644 runtime/interpreter/safe_math_test.cc create mode 100644 runtime/interpreter/shadow_frame-inl.h create mode 100644 runtime/interpreter/shadow_frame.cc create mode 100644 runtime/interpreter/shadow_frame.h create mode 100644 runtime/interpreter/unstarted_runtime.cc create mode 100644 runtime/interpreter/unstarted_runtime.h create mode 100644 runtime/interpreter/unstarted_runtime_list.h create mode 100644 runtime/interpreter/unstarted_runtime_test.cc create mode 100644 runtime/intrinsics_enum.h create mode 100644 runtime/intrinsics_list.h create mode 100644 runtime/java_frame_root_info.cc create mode 100644 runtime/java_frame_root_info.h create mode 100644 runtime/jdwp_provider.h create mode 100644 runtime/jit/TEST_MAPPING create mode 100644 runtime/jit/debugger_interface.cc create mode 100644 runtime/jit/debugger_interface.h create mode 100644 runtime/jit/jit-inl.h create mode 100644 runtime/jit/jit.cc create mode 100644 runtime/jit/jit.h create mode 100644 runtime/jit/jit_code_cache.cc create mode 100644 runtime/jit/jit_code_cache.h create mode 100644 runtime/jit/jit_memory_region.cc create mode 100644 runtime/jit/jit_memory_region.h create mode 100644 runtime/jit/jit_memory_region_test.cc create mode 100644 runtime/jit/jit_scoped_code_cache_write.h create mode 100644 runtime/jit/profile_saver.cc create mode 100644 runtime/jit/profile_saver.h create mode 100644 runtime/jit/profile_saver_options.h create mode 100644 runtime/jit/profile_saver_test.cc create mode 100644 runtime/jit/profiling_info.cc create mode 100644 runtime/jit/profiling_info.h create mode 100644 runtime/jit/profiling_info_test.cc create mode 100644 runtime/jni/check_jni.cc create mode 100644 runtime/jni/check_jni.h create mode 100644 runtime/jni/java_vm_ext.cc create mode 100644 runtime/jni/java_vm_ext.h create mode 100644 runtime/jni/java_vm_ext_test.cc create mode 100644 runtime/jni/jni_env_ext-inl.h create mode 100644 runtime/jni/jni_env_ext.cc create mode 100644 runtime/jni/jni_env_ext.h create mode 100644 runtime/jni/jni_id_manager.cc create mode 100644 runtime/jni/jni_id_manager.h create mode 100644 runtime/jni/jni_internal.cc create mode 100644 runtime/jni/jni_internal.h create mode 100644 runtime/jni/jni_internal_test.cc create mode 100644 runtime/jni_id_type.h create mode 100644 runtime/jvalue-inl.h create mode 100644 runtime/jvalue.h create mode 100644 runtime/linear_alloc.cc create mode 100644 runtime/linear_alloc.h create mode 100644 runtime/lock_word-inl.h create mode 100644 runtime/lock_word.h create mode 100644 runtime/managed_stack-inl.h create mode 100644 runtime/managed_stack.cc create mode 100644 runtime/managed_stack.h create mode 100644 runtime/mapping_table.h create mode 100644 runtime/method_handles-inl.h create mode 100644 runtime/method_handles.cc create mode 100644 runtime/method_handles.h create mode 100644 runtime/method_handles_test.cc create mode 100644 runtime/mirror/accessible_object.h create mode 100644 runtime/mirror/array-alloc-inl.h create mode 100644 runtime/mirror/array-inl.h create mode 100644 runtime/mirror/array.cc create mode 100644 runtime/mirror/array.h create mode 100644 runtime/mirror/call_site-inl.h create mode 100644 runtime/mirror/call_site.h create mode 100644 runtime/mirror/class-alloc-inl.h create mode 100644 runtime/mirror/class-inl.h create mode 100644 runtime/mirror/class-refvisitor-inl.h create mode 100644 runtime/mirror/class.cc create mode 100644 runtime/mirror/class.h create mode 100644 runtime/mirror/class_ext-inl.h create mode 100644 runtime/mirror/class_ext.cc create mode 100644 runtime/mirror/class_ext.h create mode 100644 runtime/mirror/class_flags.h create mode 100644 runtime/mirror/class_loader-inl.h create mode 100644 runtime/mirror/class_loader.h create mode 100644 runtime/mirror/dex_cache-inl.h create mode 100644 runtime/mirror/dex_cache.cc create mode 100644 runtime/mirror/dex_cache.h create mode 100644 runtime/mirror/dex_cache_test.cc create mode 100644 runtime/mirror/emulated_stack_frame-inl.h create mode 100644 runtime/mirror/emulated_stack_frame.cc create mode 100644 runtime/mirror/emulated_stack_frame.h create mode 100644 runtime/mirror/executable-inl.h create mode 100644 runtime/mirror/executable.cc create mode 100644 runtime/mirror/executable.h create mode 100644 runtime/mirror/field-inl.h create mode 100644 runtime/mirror/field.cc create mode 100644 runtime/mirror/field.h create mode 100644 runtime/mirror/iftable-inl.h create mode 100644 runtime/mirror/iftable.h create mode 100644 runtime/mirror/method.cc create mode 100644 runtime/mirror/method.h create mode 100644 runtime/mirror/method_handle_impl-inl.h create mode 100644 runtime/mirror/method_handle_impl.cc create mode 100644 runtime/mirror/method_handle_impl.h create mode 100644 runtime/mirror/method_handles_lookup.cc create mode 100644 runtime/mirror/method_handles_lookup.h create mode 100644 runtime/mirror/method_type-inl.h create mode 100644 runtime/mirror/method_type.cc create mode 100644 runtime/mirror/method_type.h create mode 100644 runtime/mirror/method_type_test.cc create mode 100644 runtime/mirror/object-inl.h create mode 100644 runtime/mirror/object-readbarrier-inl.h create mode 100644 runtime/mirror/object-refvisitor-inl.h create mode 100644 runtime/mirror/object.cc create mode 100644 runtime/mirror/object.h create mode 100644 runtime/mirror/object_array-alloc-inl.h create mode 100644 runtime/mirror/object_array-inl.h create mode 100644 runtime/mirror/object_array.h create mode 100644 runtime/mirror/object_reference-inl.h create mode 100644 runtime/mirror/object_reference.h create mode 100644 runtime/mirror/object_test.cc create mode 100644 runtime/mirror/proxy.h create mode 100644 runtime/mirror/reference-inl.h create mode 100644 runtime/mirror/reference.h create mode 100644 runtime/mirror/stack_trace_element-inl.h create mode 100644 runtime/mirror/stack_trace_element.cc create mode 100644 runtime/mirror/stack_trace_element.h create mode 100644 runtime/mirror/string-alloc-inl.h create mode 100644 runtime/mirror/string-inl.h create mode 100644 runtime/mirror/string.cc create mode 100644 runtime/mirror/string.h create mode 100644 runtime/mirror/throwable.cc create mode 100644 runtime/mirror/throwable.h create mode 100644 runtime/mirror/var_handle.cc create mode 100644 runtime/mirror/var_handle.h create mode 100644 runtime/mirror/var_handle_test.cc create mode 100644 runtime/module_exclusion_test.cc create mode 100644 runtime/monitor-inl.h create mode 100644 runtime/monitor.cc create mode 100644 runtime/monitor.h create mode 100644 runtime/monitor_android.cc create mode 100644 runtime/monitor_linux.cc create mode 100644 runtime/monitor_objects_stack_visitor.cc create mode 100644 runtime/monitor_objects_stack_visitor.h create mode 100644 runtime/monitor_pool.cc create mode 100644 runtime/monitor_pool.h create mode 100644 runtime/monitor_pool_test.cc create mode 100644 runtime/monitor_test.cc create mode 100644 runtime/native/dalvik_system_BaseDexClassLoader.cc create mode 100644 runtime/native/dalvik_system_BaseDexClassLoader.h create mode 100644 runtime/native/dalvik_system_DexFile.cc create mode 100644 runtime/native/dalvik_system_DexFile.h create mode 100644 runtime/native/dalvik_system_VMDebug.cc create mode 100644 runtime/native/dalvik_system_VMDebug.h create mode 100644 runtime/native/dalvik_system_VMRuntime.cc create mode 100644 runtime/native/dalvik_system_VMRuntime.h create mode 100644 runtime/native/dalvik_system_VMStack.cc create mode 100644 runtime/native/dalvik_system_VMStack.h create mode 100644 runtime/native/dalvik_system_ZygoteHooks.cc create mode 100644 runtime/native/dalvik_system_ZygoteHooks.h create mode 100644 runtime/native/java_lang_Class.cc create mode 100644 runtime/native/java_lang_Class.h create mode 100644 runtime/native/java_lang_Object.cc create mode 100644 runtime/native/java_lang_Object.h create mode 100644 runtime/native/java_lang_String.cc create mode 100644 runtime/native/java_lang_String.h create mode 100644 runtime/native/java_lang_StringFactory.cc create mode 100644 runtime/native/java_lang_StringFactory.h create mode 100644 runtime/native/java_lang_System.cc create mode 100644 runtime/native/java_lang_System.h create mode 100644 runtime/native/java_lang_Thread.cc create mode 100644 runtime/native/java_lang_Thread.h create mode 100644 runtime/native/java_lang_Throwable.cc create mode 100644 runtime/native/java_lang_Throwable.h create mode 100644 runtime/native/java_lang_VMClassLoader.cc create mode 100644 runtime/native/java_lang_VMClassLoader.h create mode 100644 runtime/native/java_lang_invoke_MethodHandleImpl.cc create mode 100644 runtime/native/java_lang_invoke_MethodHandleImpl.h create mode 100644 runtime/native/java_lang_ref_FinalizerReference.cc create mode 100644 runtime/native/java_lang_ref_FinalizerReference.h create mode 100644 runtime/native/java_lang_ref_Reference.cc create mode 100644 runtime/native/java_lang_ref_Reference.h create mode 100644 runtime/native/java_lang_reflect_Array.cc create mode 100644 runtime/native/java_lang_reflect_Array.h create mode 100644 runtime/native/java_lang_reflect_Constructor.cc create mode 100644 runtime/native/java_lang_reflect_Constructor.h create mode 100644 runtime/native/java_lang_reflect_Executable.cc create mode 100644 runtime/native/java_lang_reflect_Executable.h create mode 100644 runtime/native/java_lang_reflect_Field.cc create mode 100644 runtime/native/java_lang_reflect_Field.h create mode 100644 runtime/native/java_lang_reflect_Method.cc create mode 100644 runtime/native/java_lang_reflect_Method.h create mode 100644 runtime/native/java_lang_reflect_Parameter.cc create mode 100644 runtime/native/java_lang_reflect_Parameter.h create mode 100644 runtime/native/java_lang_reflect_Proxy.cc create mode 100644 runtime/native/java_lang_reflect_Proxy.h create mode 100644 runtime/native/java_util_concurrent_atomic_AtomicLong.cc create mode 100644 runtime/native/java_util_concurrent_atomic_AtomicLong.h create mode 100644 runtime/native/libcore_util_CharsetUtils.cc create mode 100644 runtime/native/libcore_util_CharsetUtils.h create mode 100644 runtime/native/native_util.h create mode 100644 runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc create mode 100644 runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h create mode 100644 runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc create mode 100644 runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h create mode 100644 runtime/native/scoped_fast_native_object_access-inl.h create mode 100644 runtime/native/scoped_fast_native_object_access.h create mode 100644 runtime/native/sun_misc_Unsafe.cc create mode 100644 runtime/native/sun_misc_Unsafe.h create mode 100644 runtime/native_bridge_art_interface.cc create mode 100644 runtime/native_bridge_art_interface.h create mode 100644 runtime/native_stack_dump.cc create mode 100644 runtime/native_stack_dump.h create mode 100644 runtime/non_debuggable_classes.cc create mode 100644 runtime/non_debuggable_classes.h create mode 100644 runtime/noop_compiler_callbacks.h create mode 100644 runtime/nterp_helpers.cc create mode 100644 runtime/nterp_helpers.h create mode 100644 runtime/nth_caller_visitor.h create mode 100644 runtime/oat.cc create mode 100644 runtime/oat.h create mode 100644 runtime/oat_file-inl.h create mode 100644 runtime/oat_file.cc create mode 100644 runtime/oat_file.h create mode 100644 runtime/oat_file_assistant.cc create mode 100644 runtime/oat_file_assistant.h create mode 100644 runtime/oat_file_assistant_test.cc create mode 100644 runtime/oat_file_manager.cc create mode 100644 runtime/oat_file_manager.h create mode 100644 runtime/oat_file_test.cc create mode 100644 runtime/oat_quick_method_header.cc create mode 100644 runtime/oat_quick_method_header.h create mode 100644 runtime/obj_ptr-inl.h create mode 100644 runtime/obj_ptr.h create mode 100644 runtime/object_callbacks.h create mode 100644 runtime/object_lock.cc create mode 100644 runtime/object_lock.h create mode 100644 runtime/offsets.cc create mode 100644 runtime/offsets.h create mode 100644 runtime/parsed_options.cc create mode 100644 runtime/parsed_options.h create mode 100644 runtime/parsed_options_test.cc create mode 100644 runtime/plugin.cc create mode 100644 runtime/plugin.h create mode 100644 runtime/prebuilt_tools_test.cc create mode 100644 runtime/process_state.h create mode 100644 runtime/proxy_test.cc create mode 100644 runtime/proxy_test.h create mode 100644 runtime/quick/quick_method_frame_info.h create mode 100644 runtime/quick_exception_handler.cc create mode 100644 runtime/quick_exception_handler.h create mode 100644 runtime/quicken_info.h create mode 100644 runtime/read_barrier-inl.h create mode 100644 runtime/read_barrier.cc create mode 100644 runtime/read_barrier.h create mode 100644 runtime/read_barrier_config.h create mode 100644 runtime/read_barrier_option.h create mode 100644 runtime/reference_table.cc create mode 100644 runtime/reference_table.h create mode 100644 runtime/reference_table_test.cc create mode 100644 runtime/reflection-inl.h create mode 100644 runtime/reflection.cc create mode 100644 runtime/reflection.h create mode 100644 runtime/reflection_test.cc create mode 100644 runtime/reflective_handle.h create mode 100644 runtime/reflective_handle_scope-inl.h create mode 100644 runtime/reflective_handle_scope.cc create mode 100644 runtime/reflective_handle_scope.h create mode 100644 runtime/reflective_reference.h create mode 100644 runtime/reflective_value_visitor.cc create mode 100644 runtime/reflective_value_visitor.h create mode 100644 runtime/runtime-inl.h create mode 100644 runtime/runtime.cc create mode 100644 runtime/runtime.h create mode 100644 runtime/runtime_android.cc create mode 100644 runtime/runtime_callbacks.cc create mode 100644 runtime/runtime_callbacks.h create mode 100644 runtime/runtime_callbacks_test.cc create mode 100644 runtime/runtime_common.cc create mode 100644 runtime/runtime_common.h create mode 100644 runtime/runtime_globals.h create mode 100644 runtime/runtime_intrinsics.cc create mode 100644 runtime/runtime_intrinsics.h create mode 100644 runtime/runtime_linux.cc create mode 100644 runtime/runtime_options.cc create mode 100644 runtime/runtime_options.def create mode 100644 runtime/runtime_options.h create mode 100644 runtime/runtime_stats.h create mode 100644 runtime/runtime_test.cc create mode 100644 runtime/scoped_thread_state_change-inl.h create mode 100644 runtime/scoped_thread_state_change.cc create mode 100644 runtime/scoped_thread_state_change.h create mode 100644 runtime/signal_catcher.cc create mode 100644 runtime/signal_catcher.h create mode 100644 runtime/signal_set.h create mode 100644 runtime/stack.cc create mode 100644 runtime/stack.h create mode 100644 runtime/stack_map.cc create mode 100644 runtime/stack_map.h create mode 100644 runtime/stack_reference.h create mode 100644 runtime/string_builder_append.cc create mode 100644 runtime/string_builder_append.h create mode 100644 runtime/subtype_check.h create mode 100644 runtime/subtype_check_bits.h create mode 100644 runtime/subtype_check_bits_and_status.h create mode 100644 runtime/subtype_check_info.h create mode 100644 runtime/subtype_check_info_test.cc create mode 100644 runtime/subtype_check_test.cc create mode 100644 runtime/suspend_reason.h create mode 100644 runtime/thread-current-inl.h create mode 100644 runtime/thread-inl.h create mode 100644 runtime/thread.cc create mode 100644 runtime/thread.h create mode 100644 runtime/thread_android.cc create mode 100644 runtime/thread_linux.cc create mode 100644 runtime/thread_list.cc create mode 100644 runtime/thread_list.h create mode 100644 runtime/thread_pool.cc create mode 100644 runtime/thread_pool.h create mode 100644 runtime/thread_pool_test.cc create mode 100644 runtime/thread_state.h create mode 100644 runtime/ti/agent.cc create mode 100644 runtime/ti/agent.h create mode 100644 runtime/trace.cc create mode 100644 runtime/trace.h create mode 100644 runtime/transaction.cc create mode 100644 runtime/transaction.h create mode 100644 runtime/transaction_test.cc create mode 100644 runtime/two_runtimes_test.cc create mode 100644 runtime/utils/dex_cache_arrays_layout-inl.h create mode 100644 runtime/utils/dex_cache_arrays_layout.h create mode 100644 runtime/var_handles.cc create mode 100644 runtime/var_handles.h create mode 100644 runtime/vdex_file.cc create mode 100644 runtime/vdex_file.h create mode 100644 runtime/vdex_file_test.cc create mode 100644 runtime/verifier/class_verifier.cc create mode 100644 runtime/verifier/class_verifier.h create mode 100644 runtime/verifier/instruction_flags.cc create mode 100644 runtime/verifier/instruction_flags.h create mode 100644 runtime/verifier/method_verifier-inl.h create mode 100644 runtime/verifier/method_verifier.cc create mode 100644 runtime/verifier/method_verifier.h create mode 100644 runtime/verifier/method_verifier_test.cc create mode 100644 runtime/verifier/reg_type-inl.h create mode 100644 runtime/verifier/reg_type.cc create mode 100644 runtime/verifier/reg_type.h create mode 100644 runtime/verifier/reg_type_cache-inl.h create mode 100644 runtime/verifier/reg_type_cache.cc create mode 100644 runtime/verifier/reg_type_cache.h create mode 100644 runtime/verifier/reg_type_test.cc create mode 100644 runtime/verifier/register_line-inl.h create mode 100644 runtime/verifier/register_line.cc create mode 100644 runtime/verifier/register_line.h create mode 100644 runtime/verifier/scoped_newline.h create mode 100644 runtime/verifier/verifier_compiler_binding.h create mode 100644 runtime/verifier/verifier_deps.cc create mode 100644 runtime/verifier/verifier_deps.h create mode 100644 runtime/verifier/verifier_enums.h create mode 100644 runtime/verify_object-inl.h create mode 100644 runtime/verify_object.cc create mode 100644 runtime/verify_object.h create mode 100644 runtime/well_known_classes.cc create mode 100644 runtime/well_known_classes.h create mode 100644 runtime/write_barrier-inl.h create mode 100644 runtime/write_barrier.h create mode 100644 sigchainlib/Android.bp create mode 100644 sigchainlib/OWNERS create mode 100644 sigchainlib/log.h create mode 100644 sigchainlib/sigchain.cc create mode 100644 sigchainlib/sigchain.h create mode 100644 sigchainlib/sigchain_dummy.cc create mode 100644 sigchainlib/sigchain_test.cc create mode 100644 sigchainlib/version-script32.txt create mode 100644 sigchainlib/version-script64.txt create mode 100644 simulator/Android.bp create mode 100644 simulator/code_simulator.cc create mode 100644 simulator/code_simulator_arm64.cc create mode 100644 simulator/code_simulator_arm64.h create mode 100644 simulator/code_simulator_container.cc create mode 100644 simulator/code_simulator_container.h create mode 100644 simulator/include/code_simulator.h create mode 100755 test.py create mode 100644 test/000-nop/build create mode 100644 test/000-nop/expected.txt create mode 100644 test/000-nop/info.txt create mode 100644 test/000-nop/run create mode 100644 test/001-HelloWorld/expected.txt create mode 100644 test/001-HelloWorld/info.txt create mode 100644 test/001-HelloWorld/src/Main.java create mode 100644 test/001-Main/expected.txt create mode 100644 test/001-Main/info.txt create mode 100644 test/001-Main/src/Main.java create mode 100644 test/002-sleep/expected.txt create mode 100644 test/002-sleep/info.txt create mode 100644 test/002-sleep/src/Main.java create mode 100644 test/003-omnibus-opcodes/build create mode 100644 test/003-omnibus-opcodes/expected.txt create mode 100644 test/003-omnibus-opcodes/info.txt create mode 100644 test/003-omnibus-opcodes/src/Array.java create mode 100644 test/003-omnibus-opcodes/src/Classes.java create mode 100644 test/003-omnibus-opcodes/src/Compare.java create mode 100644 test/003-omnibus-opcodes/src/FloatMath.java create mode 100644 test/003-omnibus-opcodes/src/GenSelect.java create mode 100644 test/003-omnibus-opcodes/src/Goto.java create mode 100644 test/003-omnibus-opcodes/src/InstField.java create mode 100644 test/003-omnibus-opcodes/src/IntMath.java create mode 100644 test/003-omnibus-opcodes/src/InternedString.java create mode 100644 test/003-omnibus-opcodes/src/Main.java create mode 100644 test/003-omnibus-opcodes/src/MethodCall.java create mode 100644 test/003-omnibus-opcodes/src/Monitor.java create mode 100644 test/003-omnibus-opcodes/src/StaticField.java create mode 100644 test/003-omnibus-opcodes/src/Switch.java create mode 100644 test/003-omnibus-opcodes/src/Throw.java create mode 100644 test/003-omnibus-opcodes/src/UnresClass.java create mode 100644 test/003-omnibus-opcodes/src/UnresClassSubclass.java create mode 100644 test/003-omnibus-opcodes/src/UnresStuff.java create mode 100644 test/003-omnibus-opcodes/src/UnresTest1.java create mode 100644 test/003-omnibus-opcodes/src/UnresTest2.java create mode 100644 test/003-omnibus-opcodes/src2/UnresStuff.java create mode 100644 test/004-InterfaceTest/expected.txt create mode 100644 test/004-InterfaceTest/info.txt create mode 100644 test/004-InterfaceTest/src/Main.java create mode 100755 test/004-JniTest/build create mode 100644 test/004-JniTest/expected.txt create mode 100644 test/004-JniTest/info.txt create mode 100644 test/004-JniTest/jni_test.cc create mode 100644 test/004-JniTest/smali/AbstractInterface.smali create mode 100644 test/004-JniTest/smali/ConcreteClass.smali create mode 100644 test/004-JniTest/smali/ConflictInterface.smali create mode 100644 test/004-JniTest/smali/DefaultInterface.smali create mode 100644 test/004-JniTest/src-ex/A.java create mode 100644 test/004-JniTest/src/Main.java create mode 100644 test/004-JniTest/src/dalvik/annotation/optimization/CriticalNative.java create mode 100644 test/004-JniTest/src/dalvik/annotation/optimization/FastNative.java create mode 100644 test/004-NativeAllocations/expected.txt create mode 100644 test/004-NativeAllocations/info.txt create mode 100644 test/004-NativeAllocations/src-art/Main.java create mode 100644 test/004-ReferenceMap/build create mode 100644 test/004-ReferenceMap/classes.dex create mode 100644 test/004-ReferenceMap/expected.txt create mode 100644 test/004-ReferenceMap/info.txt create mode 100644 test/004-ReferenceMap/src/Main.java create mode 100644 test/004-ReferenceMap/stack_walk_refmap_jni.cc create mode 100644 test/004-SignalTest/expected.txt create mode 100644 test/004-SignalTest/info.txt create mode 100644 test/004-SignalTest/signaltest.cc create mode 100644 test/004-SignalTest/src/Main.java create mode 100644 test/004-StackWalk/build create mode 100644 test/004-StackWalk/classes.dex create mode 100644 test/004-StackWalk/expected.txt create mode 100644 test/004-StackWalk/info.txt create mode 100644 test/004-StackWalk/src/Main.java create mode 100644 test/004-StackWalk/stack_walk_jni.cc create mode 100755 test/004-ThreadStress/check create mode 100644 test/004-ThreadStress/expected.txt create mode 100644 test/004-ThreadStress/info.txt create mode 100755 test/004-ThreadStress/run create mode 100644 test/004-ThreadStress/src-art/Main.java create mode 100644 test/004-ThreadStress/thread_stress.cc create mode 100644 test/004-UnsafeTest/expected.txt create mode 100644 test/004-UnsafeTest/info.txt create mode 100644 test/004-UnsafeTest/src/Main.java create mode 100644 test/004-UnsafeTest/unsafe_test.cc create mode 100644 test/004-checker-UnsafeTest18/expected.txt create mode 100644 test/004-checker-UnsafeTest18/info.txt create mode 100644 test/004-checker-UnsafeTest18/src/Main.java create mode 100644 test/005-annotations/build create mode 100644 test/005-annotations/expected.txt create mode 100644 test/005-annotations/info.txt create mode 100644 test/005-annotations/src/Main.java create mode 100644 test/005-annotations/src/android/test/AnnoSimplePackage1.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoArrayField.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoFancyConstructor.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoFancyField.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoFancyMethod.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoFancyParameter.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoFancyType.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoMissingClass.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoRenamedEnumMethod.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleConstructor.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleField.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleLocalVariable.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleMethod.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimplePackage.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleParameter.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleType.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleType2.java create mode 100644 test/005-annotations/src/android/test/anno/AnnoSimpleTypeInvis.java create mode 100644 test/005-annotations/src/android/test/anno/ClassWithInnerAnnotationClass.java create mode 100644 test/005-annotations/src/android/test/anno/ClassWithInnerClasses.java create mode 100644 test/005-annotations/src/android/test/anno/ClassWithMissingAnnotation.java create mode 100644 test/005-annotations/src/android/test/anno/ExportedProperty.java create mode 100644 test/005-annotations/src/android/test/anno/FullyNoted.java create mode 100644 test/005-annotations/src/android/test/anno/INoted.java create mode 100644 test/005-annotations/src/android/test/anno/IntToString.java create mode 100644 test/005-annotations/src/android/test/anno/MissingAnnotation.java create mode 100644 test/005-annotations/src/android/test/anno/RenamedEnumClass.java create mode 100644 test/005-annotations/src/android/test/anno/RenamedNoted.java create mode 100644 test/005-annotations/src/android/test/anno/SimplyNoted.java create mode 100644 test/005-annotations/src/android/test/anno/SomeClass.java create mode 100644 test/005-annotations/src/android/test/anno/SubNoted.java create mode 100644 test/005-annotations/src/android/test/anno/TestAnnotations.java create mode 100644 test/005-annotations/src/android/test/anno/package-info.java create mode 100644 test/005-annotations/src/android/test/package-info.java create mode 100644 test/005-annotations/src2/android/test/anno/RenamedEnumClass.java create mode 100644 test/006-args/expected.txt create mode 100644 test/006-args/info.txt create mode 100644 test/006-args/src/ArgsTest.java create mode 100644 test/006-args/src/Main.java create mode 100644 test/007-count10/expected.txt create mode 100644 test/007-count10/info.txt create mode 100644 test/007-count10/src/Main.java create mode 100644 test/008-exceptions/expected.txt create mode 100644 test/008-exceptions/info.txt create mode 100644 test/008-exceptions/src-multidex/MultiDexBadInitWrapper2.java create mode 100644 test/008-exceptions/src/Main.java create mode 100644 test/008-exceptions/src/MultiDexBadInit.java create mode 100644 test/008-exceptions/src/MultiDexBadInitWrapper1.java create mode 100644 test/009-instanceof/expected.txt create mode 100644 test/009-instanceof/info.txt create mode 100644 test/009-instanceof/src/Iface1.java create mode 100644 test/009-instanceof/src/Iface2.java create mode 100644 test/009-instanceof/src/Iface2Sub1.java create mode 100644 test/009-instanceof/src/ImplA.java create mode 100644 test/009-instanceof/src/ImplB.java create mode 100644 test/009-instanceof/src/ImplBSub.java create mode 100644 test/009-instanceof/src/Main.java create mode 100644 test/010-instance/expected.txt create mode 100644 test/010-instance/info.txt create mode 100644 test/010-instance/src/InstanceTest.java create mode 100644 test/010-instance/src/Main.java create mode 100644 test/010-instance/src/X.java create mode 100644 test/010-instance/src/Y.java create mode 100644 test/011-array-copy/expected.txt create mode 100644 test/011-array-copy/info.txt create mode 100644 test/011-array-copy/src/Iface1.java create mode 100644 test/011-array-copy/src/Iface2.java create mode 100644 test/011-array-copy/src/ImplA.java create mode 100644 test/011-array-copy/src/Main.java create mode 100644 test/012-math/expected.txt create mode 100644 test/012-math/info.txt create mode 100644 test/012-math/src/Main.java create mode 100644 test/013-math2/expected.txt create mode 100644 test/013-math2/info.txt create mode 100644 test/013-math2/src/Main.java create mode 100644 test/014-math3/expected.txt create mode 100644 test/014-math3/info.txt create mode 100644 test/014-math3/src/Main.java create mode 100644 test/015-switch/expected.txt create mode 100644 test/015-switch/info.txt create mode 100644 test/015-switch/src/Main.java create mode 100644 test/016-intern/expected.txt create mode 100644 test/016-intern/info.txt create mode 100644 test/016-intern/src/Main.java create mode 100644 test/017-float/expected.txt create mode 100644 test/017-float/info.txt create mode 100644 test/017-float/src/Main.java create mode 100644 test/018-stack-overflow/expected.txt create mode 100644 test/018-stack-overflow/info.txt create mode 100644 test/018-stack-overflow/src/Main.java create mode 100644 test/019-wrong-array-type/expected.txt create mode 100644 test/019-wrong-array-type/info.txt create mode 100644 test/019-wrong-array-type/src/Main.java create mode 100644 test/020-string/expected.txt create mode 100644 test/020-string/info.txt create mode 100644 test/020-string/src/Main.java create mode 100644 test/021-string2/expected.txt create mode 100644 test/021-string2/info.txt create mode 100644 test/021-string2/src/Main.java create mode 100644 test/021-string2/src/junit/framework/Assert.java create mode 100644 test/021-string2/src/junit/framework/AssertionFailedError.java create mode 100644 test/021-string2/src/junit/framework/ComparisonCompactor.java create mode 100644 test/021-string2/src/junit/framework/ComparisonFailure.java create mode 100644 test/022-interface/classes/Iface1.class create mode 100644 test/022-interface/classes/Iface2.class create mode 100644 test/022-interface/classes/Iface2Sub1.class create mode 100644 test/022-interface/classes/ImplA.class create mode 100644 test/022-interface/classes/ImplB.class create mode 100644 test/022-interface/classes/ImplBSub.class create mode 100644 test/022-interface/classes/Main$1.class create mode 100644 test/022-interface/classes/Main$SubInterface.class create mode 100644 test/022-interface/classes/Main$SubInterfaceImpl.class create mode 100644 test/022-interface/classes/Main.class create mode 100644 test/022-interface/classes/ObjectOverridingInterface.class create mode 100644 test/022-interface/classes/SubObjectOverridingInterface.class create mode 100644 test/022-interface/expected.txt create mode 100644 test/022-interface/info.txt create mode 100644 test/022-interface/src/Iface1.java create mode 100644 test/022-interface/src/Iface2.java create mode 100644 test/022-interface/src/Iface2Sub1.java create mode 100644 test/022-interface/src/ImplA.java create mode 100644 test/022-interface/src/ImplB.java create mode 100644 test/022-interface/src/ImplBSub.java create mode 100644 test/022-interface/src/Main.java create mode 100644 test/022-interface/src/ObjectOverridingInterface.java create mode 100644 test/022-interface/src/SubObjectOverridingInterface.java create mode 100644 test/023-many-interfaces/build create mode 100644 test/023-many-interfaces/expected.txt create mode 100644 test/023-many-interfaces/iface-gen.c create mode 100644 test/023-many-interfaces/info.txt create mode 100644 test/023-many-interfaces/src/Main.java create mode 100644 test/023-many-interfaces/src/ManyInterfaces.java create mode 100644 test/024-illegal-access/expected.txt create mode 100644 test/024-illegal-access/info.txt create mode 100644 test/024-illegal-access/src/CheckInstanceof.java create mode 100644 test/024-illegal-access/src/Main.java create mode 100644 test/024-illegal-access/src/PublicAccess.java create mode 100644 test/024-illegal-access/src/SemiPrivate.java create mode 100644 test/024-illegal-access/src/otherpkg/Package.java create mode 100644 test/024-illegal-access/src2/SemiPrivate.java create mode 100644 test/024-illegal-access/src2/otherpkg/Package.java create mode 100644 test/025-access-controller/expected.txt create mode 100644 test/025-access-controller/info.txt create mode 100644 test/025-access-controller/src/Main.java create mode 100644 test/025-access-controller/src/Privvy.java create mode 100644 test/026-access/expected.txt create mode 100644 test/026-access/info.txt create mode 100644 test/026-access/src/Iface.java create mode 100644 test/026-access/src/Iface2.java create mode 100644 test/026-access/src/Main.java create mode 100644 test/026-access/src/Unrelated.java create mode 100644 test/026-access/src/otherpackage/PublicAccess.java create mode 100644 test/027-arithmetic/expected.txt create mode 100644 test/027-arithmetic/info.txt create mode 100644 test/027-arithmetic/src/Main.java create mode 100644 test/028-array-write/expected.txt create mode 100644 test/028-array-write/info.txt create mode 100644 test/028-array-write/src/Main.java create mode 100644 test/029-assert/expected.txt create mode 100644 test/029-assert/info.txt create mode 100644 test/029-assert/src/Main.java create mode 100644 test/030-bad-finalizer/expected.txt create mode 100644 test/030-bad-finalizer/info.txt create mode 100755 test/030-bad-finalizer/run create mode 100644 test/030-bad-finalizer/src/Main.java create mode 100644 test/031-class-attributes/expected.txt create mode 100644 test/031-class-attributes/info.txt create mode 100644 test/031-class-attributes/jasmin/ClassAttrs$1.j create mode 100644 test/031-class-attributes/src/ClassAttrs.java create mode 100644 test/031-class-attributes/src/FancyClass.java create mode 100644 test/031-class-attributes/src/Main.java create mode 100644 test/031-class-attributes/src/OtherClass.java create mode 100644 test/031-class-attributes/src/otherpackage/OtherPackageClass.java create mode 100644 test/032-concrete-sub/expected.txt create mode 100644 test/032-concrete-sub/info.txt create mode 100644 test/032-concrete-sub/src/AbstractBase.java create mode 100644 test/032-concrete-sub/src/ConcreteSub.java create mode 100644 test/032-concrete-sub/src/ConcreteSub2.java create mode 100644 test/032-concrete-sub/src/Main.java create mode 100644 test/032-concrete-sub/src2/AbstractBase.java create mode 100644 test/033-class-init-deadlock/expected.txt create mode 100644 test/033-class-init-deadlock/info.txt create mode 100644 test/033-class-init-deadlock/src/Main.java create mode 100644 test/034-call-null/expected.txt create mode 100644 test/034-call-null/info.txt create mode 100755 test/034-call-null/run create mode 100644 test/034-call-null/src/Main.java create mode 100644 test/035-enum/expected.txt create mode 100644 test/035-enum/info.txt create mode 100644 test/035-enum/src/Main.java create mode 100644 test/036-finalizer/expected.txt create mode 100644 test/036-finalizer/info.txt create mode 100644 test/036-finalizer/src/Main.java create mode 100644 test/037-inherit/expected.txt create mode 100644 test/037-inherit/info.txt create mode 100644 test/037-inherit/src/Main.java create mode 100644 test/038-inner-null/expected.txt create mode 100644 test/038-inner-null/info.txt create mode 100755 test/038-inner-null/run create mode 100644 test/038-inner-null/src/Main.java create mode 100644 test/039-join-main/expected.txt create mode 100644 test/039-join-main/info.txt create mode 100644 test/039-join-main/src/Main.java create mode 100644 test/040-miranda/expected.txt create mode 100644 test/040-miranda/info.txt create mode 100644 test/040-miranda/src/Main.java create mode 100644 test/040-miranda/src/MirandaAbstract.java create mode 100644 test/040-miranda/src/MirandaClass.java create mode 100644 test/040-miranda/src/MirandaClass2.java create mode 100644 test/040-miranda/src/MirandaInterface.java create mode 100644 test/040-miranda/src/MirandaInterface2.java create mode 100644 test/041-narrowing/expected.txt create mode 100644 test/041-narrowing/info.txt create mode 100644 test/041-narrowing/src/Main.java create mode 100644 test/042-new-instance/expected.txt create mode 100644 test/042-new-instance/info.txt create mode 100644 test/042-new-instance/src/Main.java create mode 100644 test/042-new-instance/src/MaybeAbstract.java create mode 100644 test/042-new-instance/src/otherpackage/ConstructorAccess.java create mode 100644 test/042-new-instance/src/otherpackage/PackageAccess.java create mode 100644 test/042-new-instance/src2/MaybeAbstract.java create mode 100644 test/043-privates/expected.txt create mode 100644 test/043-privates/info.txt create mode 100644 test/043-privates/src/Main.java create mode 100644 test/044-proxy/expected.txt create mode 100644 test/044-proxy/info.txt create mode 100644 test/044-proxy/native_proxy.cc create mode 100644 test/044-proxy/run create mode 100644 test/044-proxy/src/BasicTest.java create mode 100644 test/044-proxy/src/Clash.java create mode 100644 test/044-proxy/src/Clash2.java create mode 100644 test/044-proxy/src/Clash3.java create mode 100644 test/044-proxy/src/Clash4.java create mode 100644 test/044-proxy/src/ConstructorProxy.java create mode 100644 test/044-proxy/src/FloatSelect.java create mode 100644 test/044-proxy/src/Main.java create mode 100644 test/044-proxy/src/MethodComparator.java create mode 100644 test/044-proxy/src/NarrowingTest.java create mode 100644 test/044-proxy/src/NativeProxy.java create mode 100644 test/044-proxy/src/OOMEOnDispatch.java create mode 100644 test/044-proxy/src/ReturnsAndArgPassing.java create mode 100644 test/044-proxy/src/WrappedThrow.java create mode 100644 test/045-reflect-array/expected.txt create mode 100644 test/045-reflect-array/info.txt create mode 100644 test/045-reflect-array/src/Main.java create mode 100644 test/046-reflect/expected.txt create mode 100644 test/046-reflect/info.txt create mode 100644 test/046-reflect/src/Main.java create mode 100644 test/046-reflect/src/otherpackage/Other.java create mode 100644 test/047-returns/expected.txt create mode 100644 test/047-returns/info.txt create mode 100644 test/047-returns/src/Main.java create mode 100644 test/048-reflect-v8/expected.txt create mode 100644 test/048-reflect-v8/info.txt create mode 100644 test/048-reflect-v8/src/AnnotationTest.java create mode 100644 test/048-reflect-v8/src/AnnotationTestFixture.java create mode 100644 test/048-reflect-v8/src/AnnotationTestHelpers.java create mode 100644 test/048-reflect-v8/src/Calendar.java create mode 100644 test/048-reflect-v8/src/Calendars.java create mode 100644 test/048-reflect-v8/src/DefaultDeclared.java create mode 100644 test/048-reflect-v8/src/IFaceA.java create mode 100644 test/048-reflect-v8/src/IFaceSimple.java create mode 100644 test/048-reflect-v8/src/IsDefaultTest.java create mode 100644 test/048-reflect-v8/src/Main.java create mode 100644 test/048-reflect-v8/src/SingleUser.java create mode 100644 test/048-reflect-v8/src/User.java create mode 100644 test/048-reflect-v8/src/User2.java create mode 100644 test/048-reflect-v8/src/UserComplex.java create mode 100644 test/048-reflect-v8/src/UserSub.java create mode 100644 test/048-reflect-v8/src/UserSub2.java create mode 100644 test/049-show-object/expected.txt create mode 100644 test/049-show-object/info.txt create mode 100644 test/049-show-object/src/Main.java create mode 100644 test/050-sync-test/expected.txt create mode 100644 test/050-sync-test/info.txt create mode 100644 test/050-sync-test/src/Main.java create mode 100644 test/050-sync-test/src/ThreadDeathHandler.java create mode 100644 test/051-thread/expected.txt create mode 100644 test/051-thread/info.txt create mode 100644 test/051-thread/src/Main.java create mode 100644 test/051-thread/thread_test.cc create mode 100644 test/052-verifier-fun/expected.txt create mode 100644 test/052-verifier-fun/info.txt create mode 100644 test/052-verifier-fun/src/Blah.java create mode 100644 test/052-verifier-fun/src/BlahFeature.java create mode 100644 test/052-verifier-fun/src/BlahOne.java create mode 100644 test/052-verifier-fun/src/BlahTwo.java create mode 100644 test/052-verifier-fun/src/Main.java create mode 100644 test/053-wait-some/expected.txt create mode 100644 test/053-wait-some/info.txt create mode 100644 test/053-wait-some/src/Main.java create mode 100644 test/054-uncaught/expected.txt create mode 100644 test/054-uncaught/info.txt create mode 100755 test/054-uncaught/run create mode 100644 test/054-uncaught/src/Main.java create mode 100644 test/054-uncaught/src/ThreadDeathHandler.java create mode 100644 test/055-enum-performance/expected.txt create mode 100644 test/055-enum-performance/info.txt create mode 100755 test/055-enum-performance/run create mode 100644 test/055-enum-performance/src/Main.java create mode 100644 test/055-enum-performance/src/SamePackagePrivateEnum.java create mode 100644 test/055-enum-performance/src/SamePackagePublicEnum.java create mode 100644 test/055-enum-performance/src/otherpackage/OtherPackagePublicEnum.java create mode 100644 test/056-const-string-jumbo/build create mode 100644 test/056-const-string-jumbo/expected.txt create mode 100644 test/056-const-string-jumbo/info.txt create mode 100644 test/056-const-string-jumbo/src/Main.java create mode 100644 test/058-enum-order/expected.txt create mode 100644 test/058-enum-order/info.txt create mode 100644 test/058-enum-order/src/Main.java create mode 100644 test/059-finalizer-throw/expected.txt create mode 100644 test/059-finalizer-throw/info.txt create mode 100644 test/059-finalizer-throw/run create mode 100644 test/059-finalizer-throw/src/Main.java create mode 100644 test/061-out-of-memory/expected.txt create mode 100644 test/061-out-of-memory/info.txt create mode 100644 test/061-out-of-memory/src/Main.java create mode 100644 test/062-character-encodings/expected.txt create mode 100644 test/062-character-encodings/info.txt create mode 100644 test/062-character-encodings/src/Main.java create mode 100644 test/063-process-manager/expected.txt create mode 100644 test/063-process-manager/info.txt create mode 100644 test/063-process-manager/src/Main.java create mode 100644 test/064-field-access/expected.txt create mode 100644 test/064-field-access/info.txt create mode 100644 test/064-field-access/jasmin/SubClassUsingInaccessibleField.j create mode 100644 test/064-field-access/run create mode 100644 test/064-field-access/src/GetNonexistent.java create mode 100644 test/064-field-access/src/Holder.java create mode 100644 test/064-field-access/src/Main.java create mode 100644 test/064-field-access/src/OOMEOnNullAccess.java create mode 100644 test/064-field-access/src/other/ProtectedClass.java create mode 100644 test/064-field-access/src/other/PublicClass.java create mode 100644 test/064-field-access/src2/Holder.java create mode 100755 test/065-mismatched-implements/build create mode 100644 test/065-mismatched-implements/expected.txt create mode 100644 test/065-mismatched-implements/info.txt create mode 100644 test/065-mismatched-implements/src/Base.java create mode 100644 test/065-mismatched-implements/src/Defs.java create mode 100644 test/065-mismatched-implements/src/Indirect.java create mode 100644 test/065-mismatched-implements/src/Main.java create mode 100644 test/065-mismatched-implements/src2/Defs.java create mode 100644 test/066-mismatched-super/build create mode 100644 test/066-mismatched-super/expected.txt create mode 100644 test/066-mismatched-super/info.txt create mode 100644 test/066-mismatched-super/src/Base.java create mode 100644 test/066-mismatched-super/src/Defs.java create mode 100644 test/066-mismatched-super/src/ExtendsFinal.java create mode 100644 test/066-mismatched-super/src/Final.java create mode 100644 test/066-mismatched-super/src/Main.java create mode 100644 test/066-mismatched-super/src2/Defs.java create mode 100644 test/066-mismatched-super/src2/Final.java create mode 100644 test/067-preemptive-unpark/expected.txt create mode 100644 test/067-preemptive-unpark/info.txt create mode 100644 test/067-preemptive-unpark/src/Main.java create mode 100644 test/068-classloader/expected.txt create mode 100644 test/068-classloader/info.txt create mode 100644 test/068-classloader/src-ex/AbstractGet.java create mode 100644 test/068-classloader/src-ex/DoubledExtend.java create mode 100644 test/068-classloader/src-ex/DoubledExtendOkay.java create mode 100644 test/068-classloader/src-ex/DoubledImplement.java create mode 100644 test/068-classloader/src-ex/DoubledImplement2.java create mode 100644 test/068-classloader/src-ex/GetDoubled.java create mode 100644 test/068-classloader/src-ex/IfaceImpl.java create mode 100644 test/068-classloader/src-ex/IfaceSub.java create mode 100644 test/068-classloader/src-ex/Inaccessible1.java create mode 100644 test/068-classloader/src-ex/Inaccessible2.java create mode 100644 test/068-classloader/src-ex/Inaccessible3.java create mode 100644 test/068-classloader/src-ex/MutationTarget.java create mode 100644 test/068-classloader/src-ex/Mutator.java create mode 100644 test/068-classloader/src/Base.java create mode 100644 test/068-classloader/src/BaseOkay.java create mode 100644 test/068-classloader/src/DoubledExtend.java create mode 100644 test/068-classloader/src/DoubledExtendOkay.java create mode 100644 test/068-classloader/src/DoubledImplement.java create mode 100644 test/068-classloader/src/DoubledImplement2.java create mode 100644 test/068-classloader/src/FancyLoader.java create mode 100644 test/068-classloader/src/ICommon.java create mode 100644 test/068-classloader/src/ICommon2.java create mode 100644 test/068-classloader/src/IGetDoubled.java create mode 100644 test/068-classloader/src/IfaceSuper.java create mode 100644 test/068-classloader/src/InaccessibleBase.java create mode 100644 test/068-classloader/src/InaccessibleInterface.java create mode 100644 test/068-classloader/src/Main.java create mode 100644 test/068-classloader/src/SimpleBase.java create mode 100644 test/068-classloader/src/Useless.java create mode 100644 test/069-field-type/expected.txt create mode 100644 test/069-field-type/info.txt create mode 100644 test/069-field-type/src/Blah.java create mode 100644 test/069-field-type/src/Holder.java create mode 100644 test/069-field-type/src/Main.java create mode 100644 test/069-field-type/src2/Blah.java create mode 100644 test/070-nio-buffer/expected.txt create mode 100644 test/070-nio-buffer/info.txt create mode 100644 test/070-nio-buffer/src/Main.java create mode 100755 test/071-dexfile-get-static-size/build create mode 100644 test/071-dexfile-get-static-size/expected.txt create mode 100644 test/071-dexfile-get-static-size/info.txt create mode 100644 test/071-dexfile-get-static-size/res/test1.dex create mode 100644 test/071-dexfile-get-static-size/res/test2.dex create mode 100644 test/071-dexfile-get-static-size/src/Main.java create mode 100755 test/071-dexfile-map-clean/build create mode 100644 test/071-dexfile-map-clean/expected.txt create mode 100644 test/071-dexfile-map-clean/info.txt create mode 100755 test/071-dexfile-map-clean/run create mode 100644 test/071-dexfile-map-clean/src-ex/Another.java create mode 100644 test/071-dexfile-map-clean/src/Main.java create mode 100644 test/071-dexfile/expected.txt create mode 100644 test/071-dexfile/info.txt create mode 100644 test/071-dexfile/src-ex/Another.java create mode 100644 test/071-dexfile/src/Main.java create mode 100644 test/072-precise-gc/expected.txt create mode 100644 test/072-precise-gc/info.txt create mode 100644 test/072-precise-gc/src/Main.java create mode 100644 test/072-reachability-fence/expected.txt create mode 100644 test/072-reachability-fence/info.txt create mode 100644 test/072-reachability-fence/src/Main.java create mode 100644 test/073-mismatched-field/expected.txt create mode 100644 test/073-mismatched-field/info.txt create mode 100644 test/073-mismatched-field/src/IMain.java create mode 100644 test/073-mismatched-field/src/Main.java create mode 100644 test/073-mismatched-field/src/SuperMain.java create mode 100644 test/073-mismatched-field/src2/IMain.java create mode 100644 test/074-gc-thrash/expected.txt create mode 100644 test/074-gc-thrash/info.txt create mode 100644 test/074-gc-thrash/src/Main.java create mode 100644 test/075-verification-error/expected.txt create mode 100644 test/075-verification-error/info.txt create mode 100644 test/075-verification-error/src/BadIfaceImpl.java create mode 100644 test/075-verification-error/src/BadInterface.java create mode 100644 test/075-verification-error/src/Main.java create mode 100644 test/075-verification-error/src/MaybeAbstract.java create mode 100644 test/075-verification-error/src/other/InaccessibleClass.java create mode 100644 test/075-verification-error/src/other/InaccessibleMethod.java create mode 100644 test/075-verification-error/src/other/Mutant.java create mode 100644 test/075-verification-error/src2/BadInterface.java create mode 100644 test/075-verification-error/src2/MaybeAbstract.java create mode 100644 test/075-verification-error/src2/other/InaccessibleClass.java create mode 100644 test/075-verification-error/src2/other/InaccessibleMethod.java create mode 100644 test/075-verification-error/src2/other/Mutant.java create mode 100644 test/076-boolean-put/expected.txt create mode 100644 test/076-boolean-put/info.txt create mode 100644 test/076-boolean-put/src/Main.java create mode 100644 test/077-method-override/expected.txt create mode 100644 test/077-method-override/info.txt create mode 100644 test/077-method-override/src/Base.java create mode 100644 test/077-method-override/src/Derived.java create mode 100644 test/077-method-override/src/Main.java create mode 100644 test/077-method-override/src2/Base.java create mode 100644 test/078-polymorphic-virtual/expected.txt create mode 100644 test/078-polymorphic-virtual/info.txt create mode 100644 test/078-polymorphic-virtual/src/Base.java create mode 100644 test/078-polymorphic-virtual/src/Derived1.java create mode 100644 test/078-polymorphic-virtual/src/Derived2.java create mode 100644 test/078-polymorphic-virtual/src/Derived3.java create mode 100644 test/078-polymorphic-virtual/src/Main.java create mode 100644 test/079-phantom/expected.txt create mode 100644 test/079-phantom/info.txt create mode 100644 test/079-phantom/src/Bitmap.java create mode 100644 test/079-phantom/src/Main.java create mode 100644 test/080-oom-fragmentation/expected.txt create mode 100644 test/080-oom-fragmentation/info.txt create mode 100644 test/080-oom-fragmentation/src/Main.java create mode 100644 test/080-oom-throw-with-finalizer/expected.txt create mode 100644 test/080-oom-throw-with-finalizer/info.txt create mode 100644 test/080-oom-throw-with-finalizer/src/Main.java create mode 100644 test/080-oom-throw/expected.txt create mode 100644 test/080-oom-throw/info.txt create mode 100644 test/080-oom-throw/run create mode 100644 test/080-oom-throw/src/Main.java create mode 100644 test/081-hot-exceptions/expected.txt create mode 100644 test/081-hot-exceptions/info.txt create mode 100644 test/081-hot-exceptions/src/Main.java create mode 100644 test/082-inline-execute/expected.txt create mode 100644 test/082-inline-execute/info.txt create mode 100644 test/082-inline-execute/src/Main.java create mode 100644 test/082-inline-execute/src/junit/framework/Assert.java create mode 100644 test/082-inline-execute/src/junit/framework/AssertionFailedError.java create mode 100644 test/082-inline-execute/src/junit/framework/ComparisonCompactor.java create mode 100644 test/082-inline-execute/src/junit/framework/ComparisonFailure.java create mode 100644 test/083-compiler-regressions/expected.txt create mode 100644 test/083-compiler-regressions/info.txt create mode 100644 test/083-compiler-regressions/src/Main.java create mode 100644 test/083-compiler-regressions/src/ZeroTests.java create mode 100644 test/084-class-init/expected.txt create mode 100644 test/084-class-init/info.txt create mode 100644 test/084-class-init/src/Exploder.java create mode 100644 test/084-class-init/src/IntHolder.java create mode 100644 test/084-class-init/src/Main.java create mode 100644 test/084-class-init/src/PartialInit.java create mode 100644 test/084-class-init/src/SlowInit.java create mode 100644 test/085-old-style-inner-class/expected.txt create mode 100644 test/085-old-style-inner-class/info.txt create mode 100644 test/085-old-style-inner-class/jasmin/Main$1.j create mode 100644 test/085-old-style-inner-class/jasmin/Main$2.j create mode 100644 test/085-old-style-inner-class/src/Main.java create mode 100644 test/086-null-super/expected.txt create mode 100644 test/086-null-super/info.txt create mode 100644 test/086-null-super/src/Main.java create mode 100644 test/087-gc-after-link/expected.txt create mode 100644 test/087-gc-after-link/info.txt create mode 100644 test/087-gc-after-link/src/Main.java create mode 100644 test/088-monitor-verification/expected.txt create mode 100644 test/088-monitor-verification/info.txt create mode 100644 test/088-monitor-verification/smali/NotStructuredOverUnlock.smali create mode 100644 test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali create mode 100644 test/088-monitor-verification/smali/NullLocks.smali create mode 100644 test/088-monitor-verification/smali/OK.smali create mode 100644 test/088-monitor-verification/smali/TooDeep.smali create mode 100644 test/088-monitor-verification/smali/UnbalancedJoin.smali create mode 100644 test/088-monitor-verification/smali/UnbalancedStraight.smali create mode 100644 test/088-monitor-verification/src/Main.java create mode 100644 test/088-monitor-verification/src/MyException.java create mode 100644 test/088-monitor-verification/src/TwoPath.java create mode 100644 test/089-many-methods/build create mode 100755 test/089-many-methods/check create mode 100644 test/089-many-methods/expected.txt create mode 100644 test/089-many-methods/info.txt create mode 100644 test/090-loop-formation/expected.txt create mode 100644 test/090-loop-formation/info.txt create mode 100644 test/090-loop-formation/src/Main.java create mode 100755 test/091-override-package-private-method/build create mode 100644 test/091-override-package-private-method/expected.txt create mode 100644 test/091-override-package-private-method/info.txt create mode 100755 test/091-override-package-private-method/run create mode 100644 test/091-override-package-private-method/src/Main.java create mode 100644 test/091-override-package-private-method/src/OverridePackagePrivateMethodSuper.java create mode 100644 test/091-override-package-private-method/src/OverridePackagePrivateMethodTest.java create mode 100644 test/092-locale/expected.txt create mode 100644 test/092-locale/info.txt create mode 100644 test/092-locale/src/Main.java create mode 100644 test/093-serialization/expected.txt create mode 100644 test/093-serialization/info.txt create mode 100644 test/093-serialization/src/Main.java create mode 100644 test/094-pattern/expected.txt create mode 100644 test/094-pattern/info.txt create mode 100644 test/094-pattern/src/Main.java create mode 100644 test/095-switch-MAX_INT/expected.txt create mode 100644 test/095-switch-MAX_INT/info.txt create mode 100644 test/095-switch-MAX_INT/src/Main.java create mode 100644 test/096-array-copy-concurrent-gc/expected.txt create mode 100644 test/096-array-copy-concurrent-gc/info.txt create mode 100644 test/096-array-copy-concurrent-gc/src/Main.java create mode 100644 test/097-duplicate-method/classes.dex create mode 100644 test/097-duplicate-method/expected.txt create mode 100644 test/097-duplicate-method/info.txt create mode 100644 test/098-ddmc/expected.txt create mode 100644 test/098-ddmc/info.txt create mode 100644 test/098-ddmc/src/Main.java create mode 100755 test/099-vmdebug/check create mode 100644 test/099-vmdebug/expected.txt create mode 100644 test/099-vmdebug/info.txt create mode 100644 test/099-vmdebug/src/Main.java create mode 100644 test/100-reflect2/expected.txt create mode 100644 test/100-reflect2/info.txt create mode 100644 test/100-reflect2/src/Main.java create mode 100644 test/100-reflect2/src/sub/PPClass.java create mode 100644 test/1000-non-moving-space-stress/expected.txt create mode 100644 test/1000-non-moving-space-stress/info.txt create mode 100644 test/1000-non-moving-space-stress/src-art/Main.java create mode 100644 test/1001-app-image-regions/app_image_regions.cc create mode 100755 test/1001-app-image-regions/build create mode 100644 test/1001-app-image-regions/expected.txt create mode 100644 test/1001-app-image-regions/info.txt create mode 100644 test/1001-app-image-regions/run create mode 100644 test/1001-app-image-regions/src/Main.java create mode 100644 test/1002-notify-startup/check create mode 100644 test/1002-notify-startup/expected.txt create mode 100644 test/1002-notify-startup/info.txt create mode 100644 test/1002-notify-startup/src-art/Main.java create mode 100644 test/1002-notify-startup/startup_interface.cc create mode 100755 test/1003-metadata-section-strings/build create mode 100644 test/1003-metadata-section-strings/expected.txt create mode 100644 test/1003-metadata-section-strings/info.txt create mode 100644 test/1003-metadata-section-strings/profile create mode 100644 test/1003-metadata-section-strings/run create mode 100644 test/1003-metadata-section-strings/src-art/Main.java create mode 100644 test/1004-checker-volatile-ref-load/expected.txt create mode 100644 test/1004-checker-volatile-ref-load/info.txt create mode 100644 test/1004-checker-volatile-ref-load/run create mode 100644 test/1004-checker-volatile-ref-load/src/Main.java create mode 100644 test/101-fibonacci/expected.txt create mode 100644 test/101-fibonacci/info.txt create mode 100644 test/101-fibonacci/src/Main.java create mode 100644 test/102-concurrent-gc/expected.txt create mode 100644 test/102-concurrent-gc/info.txt create mode 100644 test/102-concurrent-gc/src/Main.java create mode 100644 test/103-string-append/expected.txt create mode 100644 test/103-string-append/info.txt create mode 100644 test/103-string-append/src/Main.java create mode 100644 test/104-growth-limit/expected.txt create mode 100644 test/104-growth-limit/info.txt create mode 100644 test/104-growth-limit/src/Main.java create mode 100644 test/105-invoke/expected.txt create mode 100644 test/105-invoke/info.txt create mode 100644 test/105-invoke/src/Main.java create mode 100644 test/106-exceptions2/expected.txt create mode 100644 test/106-exceptions2/info.txt create mode 100644 test/106-exceptions2/src/Main.java create mode 100644 test/107-int-math2/expected.txt create mode 100644 test/107-int-math2/info.txt create mode 100644 test/107-int-math2/src/Main.java create mode 100644 test/108-check-cast/expected.txt create mode 100644 test/108-check-cast/info.txt create mode 100644 test/108-check-cast/src/Main.java create mode 100644 test/109-suspend-check/expected.txt create mode 100644 test/109-suspend-check/info.txt create mode 100644 test/109-suspend-check/src/Main.java create mode 100644 test/110-field-access/expected.txt create mode 100644 test/110-field-access/info.txt create mode 100644 test/110-field-access/src/Main.java create mode 100644 test/111-unresolvable-exception/build create mode 100644 test/111-unresolvable-exception/expected.txt create mode 100644 test/111-unresolvable-exception/info.txt create mode 100644 test/111-unresolvable-exception/src/Main.java create mode 100644 test/111-unresolvable-exception/src/TestException.java create mode 100644 test/112-double-math/expected.txt create mode 100644 test/112-double-math/info.txt create mode 100644 test/112-double-math/src/Main.java create mode 100644 test/113-multidex/expected.txt create mode 100644 test/113-multidex/info.txt create mode 100644 test/113-multidex/src-multidex/Main.java create mode 100644 test/113-multidex/src/FillerA.java create mode 100644 test/113-multidex/src/FillerB.java create mode 100644 test/113-multidex/src/Inf1.java create mode 100644 test/113-multidex/src/Inf2.java create mode 100644 test/113-multidex/src/Inf3.java create mode 100644 test/113-multidex/src/Inf4.java create mode 100644 test/113-multidex/src/Inf5.java create mode 100644 test/113-multidex/src/Inf6.java create mode 100644 test/113-multidex/src/Inf7.java create mode 100644 test/113-multidex/src/Inf8.java create mode 100644 test/113-multidex/src/Second.java create mode 100644 test/114-ParallelGC/expected.txt create mode 100644 test/114-ParallelGC/info.txt create mode 100644 test/114-ParallelGC/src/Main.java create mode 100755 test/115-native-bridge/check create mode 100644 test/115-native-bridge/expected.txt create mode 100644 test/115-native-bridge/info.txt create mode 100644 test/115-native-bridge/nativebridge.cc create mode 100644 test/115-native-bridge/run create mode 100644 test/115-native-bridge/src/NativeBridgeMain.java create mode 100644 test/116-nodex2oat/expected.txt create mode 100644 test/116-nodex2oat/info.txt create mode 100755 test/116-nodex2oat/run create mode 100644 test/116-nodex2oat/src/Main.java create mode 100755 test/118-noimage-dex2oat/check create mode 100644 test/118-noimage-dex2oat/expected.txt create mode 100644 test/118-noimage-dex2oat/info.txt create mode 100644 test/118-noimage-dex2oat/run create mode 100644 test/118-noimage-dex2oat/smali/b_18485243.smali create mode 100644 test/118-noimage-dex2oat/src/Main.java create mode 100644 test/120-hashcode/expected.txt create mode 100644 test/120-hashcode/info.txt create mode 100644 test/120-hashcode/src/Main.java create mode 100644 test/121-modifiers/expected.txt create mode 100644 test/121-modifiers/info.txt create mode 100644 test/121-modifiers/smali/A$B.smali create mode 100644 test/121-modifiers/smali/A$C.smali create mode 100644 test/121-modifiers/smali/A.smali create mode 100644 test/121-modifiers/smali/Inf.smali create mode 100644 test/121-modifiers/smali/NonInf.smali create mode 100644 test/121-modifiers/src-java/A.java create mode 100644 test/121-modifiers/src-java/Asm.java create mode 100644 test/121-modifiers/src-java/Inf.java create mode 100644 test/121-modifiers/src-java/NonInf.java create mode 100644 test/121-modifiers/src2/Main.java create mode 100644 test/121-simple-suspend-check/expected.txt create mode 100644 test/121-simple-suspend-check/info.txt create mode 100644 test/121-simple-suspend-check/src/Main.java create mode 100644 test/122-npe/expected.txt create mode 100644 test/122-npe/info.txt create mode 100644 test/122-npe/src/Main.java create mode 100644 test/123-compiler-regressions-mt/expected.txt create mode 100644 test/123-compiler-regressions-mt/info.txt create mode 100644 test/123-compiler-regressions-mt/src/Main.java create mode 100644 test/123-inline-execute2/expected.txt create mode 100644 test/123-inline-execute2/info.txt create mode 100644 test/123-inline-execute2/src/Main.java create mode 100644 test/124-missing-classes/build create mode 100644 test/124-missing-classes/expected.txt create mode 100644 test/124-missing-classes/info.txt create mode 100644 test/124-missing-classes/src/Main.java create mode 100644 test/124-missing-classes/src/MissingClass.java create mode 100644 test/125-gc-and-classloading/expected.txt create mode 100644 test/125-gc-and-classloading/info.txt create mode 100644 test/125-gc-and-classloading/src/Main.java create mode 100644 test/126-miranda-multidex/build create mode 100644 test/126-miranda-multidex/expected.txt create mode 100644 test/126-miranda-multidex/info.txt create mode 100755 test/126-miranda-multidex/run create mode 100644 test/126-miranda-multidex/src/Main.java create mode 100644 test/126-miranda-multidex/src/MirandaAbstract.java create mode 100644 test/126-miranda-multidex/src/MirandaClass.java create mode 100644 test/126-miranda-multidex/src/MirandaClass2.java create mode 100644 test/126-miranda-multidex/src/MirandaInterface.java create mode 100644 test/126-miranda-multidex/src/MirandaInterface2.java create mode 100755 test/127-checker-secondarydex/build create mode 100644 test/127-checker-secondarydex/expected.txt create mode 100644 test/127-checker-secondarydex/info.txt create mode 100755 test/127-checker-secondarydex/run create mode 100644 test/127-checker-secondarydex/src/Main.java create mode 100644 test/127-checker-secondarydex/src/Super.java create mode 100644 test/127-checker-secondarydex/src/Test.java create mode 100644 test/128-reg-spill-on-implicit-nullcheck/expected.txt create mode 100644 test/128-reg-spill-on-implicit-nullcheck/info.txt create mode 100644 test/128-reg-spill-on-implicit-nullcheck/src/Main.java create mode 100644 test/129-ThreadGetId/expected.txt create mode 100644 test/129-ThreadGetId/info.txt create mode 100644 test/129-ThreadGetId/src/Main.java create mode 100644 test/130-hprof/expected.txt create mode 100644 test/130-hprof/info.txt create mode 100644 test/130-hprof/src-ex/Allocator.java create mode 100644 test/130-hprof/src/Main.java create mode 100644 test/132-daemon-locks-shutdown/expected.txt create mode 100644 test/132-daemon-locks-shutdown/info.txt create mode 100644 test/132-daemon-locks-shutdown/src/Main.java create mode 100644 test/133-static-invoke-super/expected.txt create mode 100644 test/133-static-invoke-super/info.txt create mode 100755 test/133-static-invoke-super/run create mode 100644 test/133-static-invoke-super/src/Main.java create mode 100644 test/1336-short-finalizer-timeout/expected.txt create mode 100644 test/1336-short-finalizer-timeout/info.txt create mode 100755 test/1336-short-finalizer-timeout/run create mode 100644 test/1336-short-finalizer-timeout/src/Main.java create mode 100755 test/1337-gc-coverage/check create mode 100644 test/1337-gc-coverage/expected.txt create mode 100644 test/1337-gc-coverage/gc_coverage.cc create mode 100644 test/1337-gc-coverage/info.txt create mode 100644 test/1337-gc-coverage/src/Main.java create mode 100644 test/1338-gc-no-los/expected.txt create mode 100644 test/1338-gc-no-los/info.txt create mode 100755 test/1338-gc-no-los/run create mode 100644 test/1338-gc-no-los/src-art/Main.java create mode 100644 test/1339-dead-reference-safe/check create mode 100644 test/1339-dead-reference-safe/expected.txt create mode 100644 test/1339-dead-reference-safe/info.txt create mode 100644 test/1339-dead-reference-safe/src/DeadReferenceSafeTest.java create mode 100644 test/1339-dead-reference-safe/src/DeadReferenceUnsafeTest.java create mode 100644 test/1339-dead-reference-safe/src/Main.java create mode 100644 test/1339-dead-reference-safe/src/ReachabilityFenceTest.java create mode 100644 test/1339-dead-reference-safe/src/ReachabilitySensitiveFunTest.java create mode 100644 test/1339-dead-reference-safe/src/ReachabilitySensitiveTest.java create mode 100755 test/134-nodex2oat-nofallback/check create mode 100644 test/134-nodex2oat-nofallback/expected.txt create mode 100644 test/134-nodex2oat-nofallback/info.txt create mode 100755 test/134-nodex2oat-nofallback/run create mode 100644 test/134-nodex2oat-nofallback/src/Main.java create mode 100644 test/134-reg-promotion/expected.txt create mode 100644 test/134-reg-promotion/info.txt create mode 100644 test/134-reg-promotion/smali/Test.smali create mode 100644 test/134-reg-promotion/src/Main.java create mode 100644 test/135-MirandaDispatch/expected.txt create mode 100644 test/135-MirandaDispatch/info.txt create mode 100644 test/135-MirandaDispatch/smali/b_21646347.smali create mode 100644 test/135-MirandaDispatch/src/Main.java create mode 100644 test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc create mode 100644 test/136-daemon-jni-shutdown/expected.txt create mode 100644 test/136-daemon-jni-shutdown/info.txt create mode 100644 test/136-daemon-jni-shutdown/src/Main.java create mode 100644 test/137-cfi/cfi.cc create mode 100644 test/137-cfi/expected.txt create mode 100644 test/137-cfi/info.txt create mode 100755 test/137-cfi/run create mode 100644 test/137-cfi/src-multidex/Base.java create mode 100644 test/137-cfi/src/Main.java create mode 100644 test/138-duplicate-classes-check/expected.txt create mode 100644 test/138-duplicate-classes-check/info.txt create mode 100644 test/138-duplicate-classes-check/src-art/A.java create mode 100644 test/138-duplicate-classes-check/src-art/Main.java create mode 100644 test/138-duplicate-classes-check/src-ex/A.java create mode 100644 test/138-duplicate-classes-check/src-ex/TestEx.java create mode 100755 test/138-duplicate-classes-check2/build create mode 100644 test/138-duplicate-classes-check2/expected.txt create mode 100644 test/138-duplicate-classes-check2/info.txt create mode 100644 test/138-duplicate-classes-check2/src-ex/A.java create mode 100644 test/138-duplicate-classes-check2/src-ex/TestEx.java create mode 100644 test/138-duplicate-classes-check2/src/A.java create mode 100644 test/138-duplicate-classes-check2/src/Main.java create mode 100755 test/139-register-natives/check create mode 100644 test/139-register-natives/expected.txt create mode 100644 test/139-register-natives/info.txt create mode 100644 test/139-register-natives/regnative.cc create mode 100644 test/139-register-natives/src/Main.java create mode 100644 test/140-dce-regression/expected.txt create mode 100644 test/140-dce-regression/info.txt create mode 100644 test/140-dce-regression/src/Main.java create mode 100644 test/140-field-packing/expected.txt create mode 100644 test/140-field-packing/info.txt create mode 100644 test/140-field-packing/src/GapOrder.java create mode 100644 test/140-field-packing/src/GapOrderBase.java create mode 100644 test/140-field-packing/src/Main.java create mode 100644 test/141-class-unload/expected.txt create mode 100644 test/141-class-unload/info.txt create mode 100644 test/141-class-unload/jni_unload.cc create mode 100644 test/141-class-unload/src-ex/IntHolder.java create mode 100644 test/141-class-unload/src/Main.java create mode 100644 test/142-classloader2/expected.txt create mode 100644 test/142-classloader2/info.txt create mode 100644 test/142-classloader2/smali/B.smali create mode 100644 test/142-classloader2/smali/MyPathClassLoader.smali create mode 100644 test/142-classloader2/src-ex/A.java create mode 100644 test/142-classloader2/src/A.java create mode 100644 test/142-classloader2/src/Main.java create mode 100755 test/143-string-value/check create mode 100644 test/143-string-value/expected.txt create mode 100644 test/143-string-value/info.txt create mode 100644 test/143-string-value/src/Main.java create mode 100644 test/144-static-field-sigquit/expected.txt create mode 100644 test/144-static-field-sigquit/info.txt create mode 100644 test/144-static-field-sigquit/src/ClassWithStaticField.java create mode 100644 test/144-static-field-sigquit/src/Main.java create mode 100644 test/144-static-field-sigquit/src/SigQuit.java create mode 100644 test/144-static-field-sigquit/src/SynchronizedUse.java create mode 100644 test/145-alloc-tracking-stress/expected.txt create mode 100644 test/145-alloc-tracking-stress/info.txt create mode 100644 test/145-alloc-tracking-stress/src-art/Main.java create mode 100644 test/146-bad-interface/check create mode 100644 test/146-bad-interface/expected.txt create mode 100644 test/146-bad-interface/info.txt create mode 100755 test/146-bad-interface/run create mode 100644 test/146-bad-interface/smali/invoke_inf.smali create mode 100644 test/146-bad-interface/src-art/Main.java create mode 100644 test/146-bad-interface/src-ex/A.java create mode 100644 test/146-bad-interface/src-ex/Iface.java create mode 100644 test/147-stripped-dex-fallback/expected.txt create mode 100644 test/147-stripped-dex-fallback/info.txt create mode 100755 test/147-stripped-dex-fallback/run create mode 100644 test/147-stripped-dex-fallback/src/Main.java create mode 100755 test/148-multithread-gc-annotations/check create mode 100644 test/148-multithread-gc-annotations/expected.txt create mode 100644 test/148-multithread-gc-annotations/gc_coverage.cc create mode 100644 test/148-multithread-gc-annotations/info.txt create mode 100644 test/148-multithread-gc-annotations/src/AnnoClass1.java create mode 100644 test/148-multithread-gc-annotations/src/AnnoClass2.java create mode 100644 test/148-multithread-gc-annotations/src/AnnoClass3.java create mode 100644 test/148-multithread-gc-annotations/src/AnnotationThread.java create mode 100644 test/148-multithread-gc-annotations/src/Main.java create mode 100644 test/148-multithread-gc-annotations/src/MovingGCThread.java create mode 100755 test/149-suspend-all-stress/check create mode 100644 test/149-suspend-all-stress/expected.txt create mode 100644 test/149-suspend-all-stress/info.txt create mode 100644 test/149-suspend-all-stress/src/Main.java create mode 100644 test/149-suspend-all-stress/suspend_all.cc create mode 100644 test/150-loadlibrary/expected.txt create mode 100644 test/150-loadlibrary/info.txt create mode 100644 test/150-loadlibrary/src/Main.java create mode 100644 test/151-OpenFileLimit/expected.txt create mode 100644 test/151-OpenFileLimit/info.txt create mode 100755 test/151-OpenFileLimit/run create mode 100644 test/151-OpenFileLimit/src/Main.java create mode 100644 test/152-dead-large-object/expected.txt create mode 100644 test/152-dead-large-object/info.txt create mode 100644 test/152-dead-large-object/src/Main.java create mode 100644 test/153-reference-stress/expected.txt create mode 100644 test/153-reference-stress/info.txt create mode 100644 test/153-reference-stress/src/Main.java create mode 100644 test/154-gc-loop/expected.txt create mode 100644 test/154-gc-loop/heap_interface.cc create mode 100644 test/154-gc-loop/info.txt create mode 100644 test/154-gc-loop/src/Main.java create mode 100644 test/155-java-set-resolved-type/expected.txt create mode 100644 test/155-java-set-resolved-type/info.txt create mode 100644 test/155-java-set-resolved-type/src-ex/TestInterface.java create mode 100644 test/155-java-set-resolved-type/src/Main.java create mode 100644 test/155-java-set-resolved-type/src/TestImplementation.java create mode 100644 test/155-java-set-resolved-type/src/TestInterface.java create mode 100644 test/155-java-set-resolved-type/src/TestParameter.java create mode 100644 test/156-register-dex-file-multi-loader/expected.txt create mode 100644 test/156-register-dex-file-multi-loader/info.txt create mode 100644 test/156-register-dex-file-multi-loader/src/Main.java create mode 100644 test/157-void-class/expected.txt create mode 100644 test/157-void-class/info.txt create mode 100755 test/157-void-class/run create mode 100644 test/157-void-class/src-art/Main.java create mode 100644 test/158-app-image-class-table/expected.txt create mode 100644 test/158-app-image-class-table/info.txt create mode 100644 test/158-app-image-class-table/profile create mode 100644 test/158-app-image-class-table/run create mode 100644 test/158-app-image-class-table/src/Main.java create mode 100644 test/158-app-image-class-table/src/TestImplementation.java create mode 100644 test/159-app-image-fields/expected.txt create mode 100644 test/159-app-image-fields/info.txt create mode 100644 test/159-app-image-fields/profile create mode 100644 test/159-app-image-fields/run create mode 100644 test/159-app-image-fields/src/AAA/Base.java create mode 100644 test/159-app-image-fields/src/AAA/Derived.java create mode 100644 test/159-app-image-fields/src/Main.java create mode 100644 test/160-read-barrier-stress/expected.txt create mode 100644 test/160-read-barrier-stress/info.txt create mode 100644 test/160-read-barrier-stress/run create mode 100644 test/160-read-barrier-stress/src/Main.java create mode 100644 test/160-read-barrier-stress/src/ManyFieldsBase0.java create mode 100644 test/160-read-barrier-stress/src/ManyFieldsBase1.java create mode 100644 test/160-read-barrier-stress/src/ManyFieldsBase2.java create mode 100644 test/160-read-barrier-stress/src/ManyFieldsBase3.java create mode 100644 test/161-final-abstract-class/expected.txt create mode 100644 test/161-final-abstract-class/info.txt create mode 100644 test/161-final-abstract-class/smali/AbstractFinal.smali create mode 100644 test/161-final-abstract-class/smali/Main.smali create mode 100644 test/161-final-abstract-class/smali/TestClass.smali create mode 100644 test/162-method-resolution/expected.txt create mode 100644 test/162-method-resolution/info.txt create mode 100644 test/162-method-resolution/jasmin-multidex/Test1User.j create mode 100644 test/162-method-resolution/jasmin-multidex/Test3User.j create mode 100644 test/162-method-resolution/jasmin/Test10Base.j create mode 100644 test/162-method-resolution/jasmin/Test10User.j create mode 100644 test/162-method-resolution/jasmin/Test1Derived.j create mode 100644 test/162-method-resolution/jasmin/Test1User2.j create mode 100644 test/162-method-resolution/jasmin/Test2Derived.j create mode 100644 test/162-method-resolution/jasmin/Test2User.j create mode 100644 test/162-method-resolution/jasmin/Test2User2.j create mode 100644 test/162-method-resolution/jasmin/Test3Derived.j create mode 100644 test/162-method-resolution/jasmin/Test4User.j create mode 100644 test/162-method-resolution/jasmin/Test5User.j create mode 100644 test/162-method-resolution/jasmin/Test5User2.j create mode 100644 test/162-method-resolution/jasmin/Test6User.j create mode 100644 test/162-method-resolution/jasmin/Test6User2.j create mode 100644 test/162-method-resolution/jasmin/Test8Derived.j create mode 100644 test/162-method-resolution/jasmin/Test8User.j create mode 100644 test/162-method-resolution/jasmin/Test8User2.j create mode 100644 test/162-method-resolution/jasmin/Test9Derived.j create mode 100644 test/162-method-resolution/jasmin/Test9User.j create mode 100644 test/162-method-resolution/jasmin/Test9User2.j create mode 100644 test/162-method-resolution/src/Main.java create mode 100644 test/162-method-resolution/src/Test10Interface.java create mode 100644 test/162-method-resolution/src/Test1Base.java create mode 100644 test/162-method-resolution/src/Test2Base.java create mode 100644 test/162-method-resolution/src/Test2Interface.java create mode 100644 test/162-method-resolution/src/Test3Base.java create mode 100644 test/162-method-resolution/src/Test3Interface.java create mode 100644 test/162-method-resolution/src/Test4Derived.java create mode 100644 test/162-method-resolution/src/Test4Interface.java create mode 100644 test/162-method-resolution/src/Test5Base.java create mode 100644 test/162-method-resolution/src/Test5Derived.java create mode 100644 test/162-method-resolution/src/Test5Interface.java create mode 100644 test/162-method-resolution/src/Test6Derived.java create mode 100644 test/162-method-resolution/src/Test6Interface.java create mode 100644 test/162-method-resolution/src/Test7Base.java create mode 100644 test/162-method-resolution/src/Test7Derived.java create mode 100644 test/162-method-resolution/src/Test7Interface.java create mode 100644 test/162-method-resolution/src/Test7User.java create mode 100644 test/162-method-resolution/src/Test7User2.java create mode 100644 test/162-method-resolution/src/Test8Base.java create mode 100644 test/162-method-resolution/src/Test9Base.java create mode 100644 test/163-app-image-methods/expected.txt create mode 100644 test/163-app-image-methods/info.txt create mode 100644 test/163-app-image-methods/profile create mode 100644 test/163-app-image-methods/run create mode 100644 test/163-app-image-methods/src/AAA/Base.java create mode 100644 test/163-app-image-methods/src/AAA/Derived.java create mode 100644 test/163-app-image-methods/src/Main.java create mode 100644 test/164-resolution-trampoline-dex-cache/expected.txt create mode 100644 test/164-resolution-trampoline-dex-cache/info.txt create mode 100644 test/164-resolution-trampoline-dex-cache/profile create mode 100644 test/164-resolution-trampoline-dex-cache/run create mode 100644 test/164-resolution-trampoline-dex-cache/src-ex/MostDerived.java create mode 100644 test/164-resolution-trampoline-dex-cache/src/Base.java create mode 100644 test/164-resolution-trampoline-dex-cache/src/Derived.java create mode 100644 test/164-resolution-trampoline-dex-cache/src/Main.java create mode 100644 test/165-lock-owner-proxy/expected.txt create mode 100644 test/165-lock-owner-proxy/info.txt create mode 100644 test/165-lock-owner-proxy/run create mode 100644 test/165-lock-owner-proxy/src/Main.java create mode 100644 test/166-bad-interface-super/build create mode 100644 test/166-bad-interface-super/expected.txt create mode 100644 test/166-bad-interface-super/info.txt create mode 100644 test/166-bad-interface-super/jasmin/BadSuper1.j create mode 100644 test/166-bad-interface-super/jasmin/BadSuper2.j create mode 100644 test/166-bad-interface-super/smali/BadSuper1.smali create mode 100644 test/166-bad-interface-super/smali/BadSuper2.smali create mode 100644 test/166-bad-interface-super/src/BaseClass.java create mode 100644 test/166-bad-interface-super/src/BaseInterface.java create mode 100644 test/166-bad-interface-super/src/Main.java create mode 100644 test/167-visit-locks/expected.txt create mode 100644 test/167-visit-locks/info.txt create mode 100644 test/167-visit-locks/run create mode 100644 test/167-visit-locks/smali/TestSync.smali create mode 100644 test/167-visit-locks/src/Main.java create mode 100644 test/167-visit-locks/visit_locks.cc create mode 100644 test/168-vmstack-annotated/expected.txt create mode 100644 test/168-vmstack-annotated/info.txt create mode 100644 test/168-vmstack-annotated/run create mode 100644 test/168-vmstack-annotated/src/Main.java create mode 100644 test/169-threadgroup-jni/expected.txt create mode 100644 test/169-threadgroup-jni/info.txt create mode 100644 test/169-threadgroup-jni/jni_daemon_thread.cc create mode 100644 test/169-threadgroup-jni/src/Main.java create mode 100644 test/170-interface-init/expected.txt create mode 100644 test/170-interface-init/info.txt create mode 100644 test/170-interface-init/src/Main.java create mode 100644 test/171-init-aste/expected.txt create mode 100644 test/171-init-aste/info.txt create mode 100644 test/171-init-aste/src-art/Main.java create mode 100644 test/171-init-aste/src/Main.java create mode 100755 test/172-app-image-twice/check create mode 100644 test/172-app-image-twice/debug_print_class.cc create mode 100644 test/172-app-image-twice/expected.txt create mode 100644 test/172-app-image-twice/info.txt create mode 100644 test/172-app-image-twice/profile create mode 100644 test/172-app-image-twice/run create mode 100644 test/172-app-image-twice/src/Main.java create mode 100644 test/172-app-image-twice/src/TestClass.java create mode 100644 test/173-missing-field-type/expected.txt create mode 100644 test/173-missing-field-type/info.txt create mode 100644 test/173-missing-field-type/smali/BadField.smali create mode 100644 test/173-missing-field-type/src-art/Main.java create mode 100644 test/173-missing-field-type/src/Main.java create mode 100644 test/174-escaping-instance-of-bad-class/expected.txt create mode 100644 test/174-escaping-instance-of-bad-class/info.txt create mode 100644 test/174-escaping-instance-of-bad-class/src/Main.java create mode 100644 test/175-alloc-big-bignums/expected.txt create mode 100644 test/175-alloc-big-bignums/info.txt create mode 100644 test/175-alloc-big-bignums/src/Main.java create mode 100644 test/176-app-image-string/expected.txt create mode 100644 test/176-app-image-string/info.txt create mode 100644 test/176-app-image-string/profile create mode 100644 test/176-app-image-string/run create mode 100644 test/176-app-image-string/src/Main.java create mode 100644 test/177-visibly-initialized-deadlock/expected.txt create mode 100644 test/177-visibly-initialized-deadlock/info.txt create mode 100644 test/177-visibly-initialized-deadlock/src/Main.java create mode 100644 test/177-visibly-initialized-deadlock/visibly_initialized.cc create mode 100755 test/178-app-image-native-method/check create mode 100644 test/178-app-image-native-method/expected.txt create mode 100644 test/178-app-image-native-method/info.txt create mode 100644 test/178-app-image-native-method/native_methods.cc create mode 100644 test/178-app-image-native-method/profile create mode 100644 test/178-app-image-native-method/run create mode 100644 test/178-app-image-native-method/src/Main.java create mode 100644 test/180-native-default-method/build create mode 100644 test/180-native-default-method/expected.txt create mode 100644 test/180-native-default-method/info.txt create mode 100644 test/180-native-default-method/jasmin/TestClass.j create mode 100644 test/180-native-default-method/jasmin/TestInterface.j create mode 100644 test/180-native-default-method/src/Main.java create mode 100644 test/1900-track-alloc/alloc.cc create mode 100644 test/1900-track-alloc/expected.txt create mode 100644 test/1900-track-alloc/info.txt create mode 100755 test/1900-track-alloc/run create mode 100644 test/1900-track-alloc/src/Main.java create mode 120000 test/1900-track-alloc/src/art/Main.java create mode 100644 test/1900-track-alloc/src/art/Test1900.java create mode 100644 test/1901-get-bytecodes/bytecodes.cc create mode 100644 test/1901-get-bytecodes/expected.txt create mode 100644 test/1901-get-bytecodes/info.txt create mode 100755 test/1901-get-bytecodes/run create mode 100644 test/1901-get-bytecodes/src/Main.java create mode 100644 test/1901-get-bytecodes/src/art/Test1901.java create mode 100644 test/1902-suspend/expected.txt create mode 100644 test/1902-suspend/info.txt create mode 100755 test/1902-suspend/run create mode 100644 test/1902-suspend/src/Main.java create mode 120000 test/1902-suspend/src/art/Suspension.java create mode 100644 test/1902-suspend/src/art/Test1902.java create mode 100644 test/1903-suspend-self/expected.txt create mode 100644 test/1903-suspend-self/info.txt create mode 100755 test/1903-suspend-self/run create mode 100644 test/1903-suspend-self/src/Main.java create mode 120000 test/1903-suspend-self/src/art/Suspension.java create mode 100644 test/1903-suspend-self/src/art/Test1903.java create mode 100644 test/1904-double-suspend/expected.txt create mode 100644 test/1904-double-suspend/info.txt create mode 100755 test/1904-double-suspend/run create mode 100644 test/1904-double-suspend/src/Main.java create mode 120000 test/1904-double-suspend/src/art/Suspension.java create mode 100644 test/1904-double-suspend/src/art/Test1904.java create mode 100644 test/1905-suspend-native/expected.txt create mode 100644 test/1905-suspend-native/info.txt create mode 100644 test/1905-suspend-native/native_suspend.cc create mode 100755 test/1905-suspend-native/run create mode 100644 test/1905-suspend-native/src/Main.java create mode 120000 test/1905-suspend-native/src/art/Suspension.java create mode 100644 test/1905-suspend-native/src/art/Test1905.java create mode 100644 test/1906-suspend-list-me-first/expected.txt create mode 100644 test/1906-suspend-list-me-first/info.txt create mode 100755 test/1906-suspend-list-me-first/run create mode 100644 test/1906-suspend-list-me-first/src/Main.java create mode 120000 test/1906-suspend-list-me-first/src/art/Suspension.java create mode 100644 test/1906-suspend-list-me-first/src/art/Test1906.java create mode 100644 test/1907-suspend-list-self-twice/expected.txt create mode 100644 test/1907-suspend-list-self-twice/info.txt create mode 100755 test/1907-suspend-list-self-twice/run create mode 100644 test/1907-suspend-list-self-twice/src/Main.java create mode 120000 test/1907-suspend-list-self-twice/src/art/Suspension.java create mode 100644 test/1907-suspend-list-self-twice/src/art/Test1907.java create mode 100644 test/1908-suspend-native-resume-self/expected.txt create mode 100644 test/1908-suspend-native-resume-self/info.txt create mode 100644 test/1908-suspend-native-resume-self/native_suspend_resume.cc create mode 100755 test/1908-suspend-native-resume-self/run create mode 100644 test/1908-suspend-native-resume-self/src/Main.java create mode 120000 test/1908-suspend-native-resume-self/src/art/Suspension.java create mode 100644 test/1908-suspend-native-resume-self/src/art/Test1908.java create mode 100644 test/1909-per-agent-tls/agent_tls.cc create mode 100644 test/1909-per-agent-tls/expected.txt create mode 100644 test/1909-per-agent-tls/info.txt create mode 100755 test/1909-per-agent-tls/run create mode 100644 test/1909-per-agent-tls/src/Main.java create mode 120000 test/1909-per-agent-tls/src/art/Main.java create mode 100644 test/1909-per-agent-tls/src/art/Test1909.java create mode 100644 test/1910-transform-with-default/expected.txt create mode 100644 test/1910-transform-with-default/info.txt create mode 100755 test/1910-transform-with-default/run create mode 100644 test/1910-transform-with-default/src/Main.java create mode 120000 test/1910-transform-with-default/src/art/Redefinition.java create mode 100644 test/1910-transform-with-default/src/art/Test1910.java create mode 100644 test/1911-get-local-var-table/expected.txt create mode 100644 test/1911-get-local-var-table/info.txt create mode 100755 test/1911-get-local-var-table/run create mode 100644 test/1911-get-local-var-table/src/Main.java create mode 120000 test/1911-get-local-var-table/src/art/Breakpoint.java create mode 120000 test/1911-get-local-var-table/src/art/Locals.java create mode 120000 test/1911-get-local-var-table/src/art/Suspension.java create mode 100644 test/1911-get-local-var-table/src/art/Test1911.java create mode 100644 test/1912-get-set-local-primitive/expected.txt create mode 100644 test/1912-get-set-local-primitive/info.txt create mode 100755 test/1912-get-set-local-primitive/run create mode 100644 test/1912-get-set-local-primitive/src/Main.java create mode 120000 test/1912-get-set-local-primitive/src/art/Breakpoint.java create mode 120000 test/1912-get-set-local-primitive/src/art/Locals.java create mode 120000 test/1912-get-set-local-primitive/src/art/StackTrace.java create mode 120000 test/1912-get-set-local-primitive/src/art/Suspension.java create mode 100644 test/1912-get-set-local-primitive/src/art/Test1912.java create mode 100644 test/1913-get-set-local-objects/expected.txt create mode 100644 test/1913-get-set-local-objects/info.txt create mode 100755 test/1913-get-set-local-objects/run create mode 100644 test/1913-get-set-local-objects/src/Main.java create mode 120000 test/1913-get-set-local-objects/src/art/Breakpoint.java create mode 120000 test/1913-get-set-local-objects/src/art/Locals.java create mode 120000 test/1913-get-set-local-objects/src/art/StackTrace.java create mode 120000 test/1913-get-set-local-objects/src/art/Suspension.java create mode 100644 test/1913-get-set-local-objects/src/art/Test1913.java create mode 100644 test/1914-get-local-instance/expected.txt create mode 100644 test/1914-get-local-instance/info.txt create mode 100644 test/1914-get-local-instance/local_instance.cc create mode 100755 test/1914-get-local-instance/run create mode 100644 test/1914-get-local-instance/src/Main.java create mode 120000 test/1914-get-local-instance/src/art/Breakpoint.java create mode 120000 test/1914-get-local-instance/src/art/Locals.java create mode 120000 test/1914-get-local-instance/src/art/StackTrace.java create mode 120000 test/1914-get-local-instance/src/art/Suspension.java create mode 100644 test/1914-get-local-instance/src/art/Test1914.java create mode 100644 test/1915-get-set-local-current-thread/expected.txt create mode 100644 test/1915-get-set-local-current-thread/info.txt create mode 100755 test/1915-get-set-local-current-thread/run create mode 100644 test/1915-get-set-local-current-thread/src/Main.java create mode 120000 test/1915-get-set-local-current-thread/src/art/Breakpoint.java create mode 120000 test/1915-get-set-local-current-thread/src/art/Locals.java create mode 120000 test/1915-get-set-local-current-thread/src/art/StackTrace.java create mode 120000 test/1915-get-set-local-current-thread/src/art/Suspension.java create mode 100644 test/1915-get-set-local-current-thread/src/art/Test1915.java create mode 100644 test/1916-get-set-current-frame/expected.txt create mode 100644 test/1916-get-set-current-frame/info.txt create mode 100755 test/1916-get-set-current-frame/run create mode 100644 test/1916-get-set-current-frame/src/Main.java create mode 120000 test/1916-get-set-current-frame/src/art/Breakpoint.java create mode 120000 test/1916-get-set-current-frame/src/art/Locals.java create mode 120000 test/1916-get-set-current-frame/src/art/StackTrace.java create mode 120000 test/1916-get-set-current-frame/src/art/Suspension.java create mode 100644 test/1916-get-set-current-frame/src/art/Test1916.java create mode 100644 test/1917-get-stack-frame/expected.txt create mode 100644 test/1917-get-stack-frame/info.txt create mode 100755 test/1917-get-stack-frame/run create mode 100644 test/1917-get-stack-frame/src/Main.java create mode 120000 test/1917-get-stack-frame/src/art/Breakpoint.java create mode 120000 test/1917-get-stack-frame/src/art/StackTrace.java create mode 120000 test/1917-get-stack-frame/src/art/Suspension.java create mode 100644 test/1917-get-stack-frame/src/art/Test1917.java create mode 100644 test/1919-vminit-thread-start-timing/expected.txt create mode 100644 test/1919-vminit-thread-start-timing/info.txt create mode 100755 test/1919-vminit-thread-start-timing/run create mode 100644 test/1919-vminit-thread-start-timing/src/Main.java create mode 120000 test/1919-vminit-thread-start-timing/src/art/Main.java create mode 100644 test/1919-vminit-thread-start-timing/src/art/Test1919.java create mode 100644 test/1919-vminit-thread-start-timing/vminit.cc create mode 100644 test/1919-vminit-thread-start-timing/vminit.h create mode 100644 test/1920-suspend-native-monitor/expected.txt create mode 100644 test/1920-suspend-native-monitor/info.txt create mode 100644 test/1920-suspend-native-monitor/native_suspend_monitor.cc create mode 100755 test/1920-suspend-native-monitor/run create mode 100644 test/1920-suspend-native-monitor/src/Main.java create mode 120000 test/1920-suspend-native-monitor/src/art/Suspension.java create mode 100644 test/1920-suspend-native-monitor/src/art/Test1920.java create mode 100644 test/1921-suspend-native-recursive-monitor/expected.txt create mode 100644 test/1921-suspend-native-recursive-monitor/info.txt create mode 100644 test/1921-suspend-native-recursive-monitor/native_suspend_recursive_monitor.cc create mode 100755 test/1921-suspend-native-recursive-monitor/run create mode 100644 test/1921-suspend-native-recursive-monitor/src/Main.java create mode 120000 test/1921-suspend-native-recursive-monitor/src/art/Suspension.java create mode 100644 test/1921-suspend-native-recursive-monitor/src/art/Test1921.java create mode 100644 test/1922-owned-monitors-info/expected.txt create mode 100644 test/1922-owned-monitors-info/info.txt create mode 100644 test/1922-owned-monitors-info/owned_monitors.cc create mode 100755 test/1922-owned-monitors-info/run create mode 100644 test/1922-owned-monitors-info/src/Main.java create mode 120000 test/1922-owned-monitors-info/src/art/Suspension.java create mode 100644 test/1922-owned-monitors-info/src/art/Test1922.java create mode 100644 test/1923-frame-pop/expected.txt create mode 100644 test/1923-frame-pop/info.txt create mode 100755 test/1923-frame-pop/run create mode 100644 test/1923-frame-pop/src/Main.java create mode 120000 test/1923-frame-pop/src/art/Breakpoint.java create mode 120000 test/1923-frame-pop/src/art/FramePop.java create mode 120000 test/1923-frame-pop/src/art/Locals.java create mode 120000 test/1923-frame-pop/src/art/StackTrace.java create mode 120000 test/1923-frame-pop/src/art/Suspension.java create mode 100644 test/1923-frame-pop/src/art/Test1923.java create mode 120000 test/1923-frame-pop/src/art/Trace.java create mode 100644 test/1924-frame-pop-toggle/expected.txt create mode 100644 test/1924-frame-pop-toggle/frame_pop_toggle.cc create mode 100644 test/1924-frame-pop-toggle/info.txt create mode 100755 test/1924-frame-pop-toggle/run create mode 100644 test/1924-frame-pop-toggle/src/Main.java create mode 120000 test/1924-frame-pop-toggle/src/art/Breakpoint.java create mode 120000 test/1924-frame-pop-toggle/src/art/FramePop.java create mode 120000 test/1924-frame-pop-toggle/src/art/Locals.java create mode 120000 test/1924-frame-pop-toggle/src/art/StackTrace.java create mode 120000 test/1924-frame-pop-toggle/src/art/Suspension.java create mode 100644 test/1924-frame-pop-toggle/src/art/Test1924.java create mode 120000 test/1924-frame-pop-toggle/src/art/Trace.java create mode 100644 test/1925-self-frame-pop/expected.txt create mode 100644 test/1925-self-frame-pop/info.txt create mode 100755 test/1925-self-frame-pop/run create mode 100644 test/1925-self-frame-pop/src/Main.java create mode 120000 test/1925-self-frame-pop/src/art/Breakpoint.java create mode 120000 test/1925-self-frame-pop/src/art/FramePop.java create mode 120000 test/1925-self-frame-pop/src/art/Locals.java create mode 120000 test/1925-self-frame-pop/src/art/StackTrace.java create mode 120000 test/1925-self-frame-pop/src/art/Suspension.java create mode 100644 test/1925-self-frame-pop/src/art/Test1925.java create mode 120000 test/1925-self-frame-pop/src/art/Trace.java create mode 100644 test/1926-missed-frame-pop/expected.txt create mode 100644 test/1926-missed-frame-pop/frame_pop_missed.cc create mode 100644 test/1926-missed-frame-pop/info.txt create mode 100755 test/1926-missed-frame-pop/run create mode 100644 test/1926-missed-frame-pop/src/Main.java create mode 120000 test/1926-missed-frame-pop/src/art/Breakpoint.java create mode 120000 test/1926-missed-frame-pop/src/art/FramePop.java create mode 120000 test/1926-missed-frame-pop/src/art/Locals.java create mode 120000 test/1926-missed-frame-pop/src/art/StackTrace.java create mode 120000 test/1926-missed-frame-pop/src/art/Suspension.java create mode 100644 test/1926-missed-frame-pop/src/art/Test1926.java create mode 120000 test/1926-missed-frame-pop/src/art/Trace.java create mode 100644 test/1927-exception-event/exception_event.cc create mode 100644 test/1927-exception-event/expected.txt create mode 100644 test/1927-exception-event/info.txt create mode 100755 test/1927-exception-event/run create mode 100644 test/1927-exception-event/src/Main.java create mode 120000 test/1927-exception-event/src/art/Breakpoint.java create mode 120000 test/1927-exception-event/src/art/Exceptions.java create mode 120000 test/1927-exception-event/src/art/StackTrace.java create mode 120000 test/1927-exception-event/src/art/Suspension.java create mode 100644 test/1927-exception-event/src/art/Test1927.java create mode 100644 test/1928-exception-event-exception/expected.txt create mode 100644 test/1928-exception-event-exception/info.txt create mode 100755 test/1928-exception-event-exception/run create mode 100644 test/1928-exception-event-exception/src/Main.java create mode 120000 test/1928-exception-event-exception/src/art/Breakpoint.java create mode 120000 test/1928-exception-event-exception/src/art/Exceptions.java create mode 120000 test/1928-exception-event-exception/src/art/StackTrace.java create mode 120000 test/1928-exception-event-exception/src/art/Suspension.java create mode 100644 test/1928-exception-event-exception/src/art/Test1928.java create mode 100644 test/1929-exception-catch-exception/expected.txt create mode 100644 test/1929-exception-catch-exception/info.txt create mode 100755 test/1929-exception-catch-exception/run create mode 100644 test/1929-exception-catch-exception/smali/art/Test1929$Impl.smali create mode 100644 test/1929-exception-catch-exception/src/Main.java create mode 120000 test/1929-exception-catch-exception/src/art/Breakpoint.java create mode 120000 test/1929-exception-catch-exception/src/art/Exceptions.java create mode 120000 test/1929-exception-catch-exception/src/art/StackTrace.java create mode 120000 test/1929-exception-catch-exception/src/art/Suspension.java create mode 100644 test/1929-exception-catch-exception/src/art/Test1929.java create mode 100644 test/1930-monitor-info/expected.txt create mode 100644 test/1930-monitor-info/info.txt create mode 100644 test/1930-monitor-info/monitor.cc create mode 100755 test/1930-monitor-info/run create mode 100644 test/1930-monitor-info/src/Main.java create mode 120000 test/1930-monitor-info/src/art/Monitors.java create mode 120000 test/1930-monitor-info/src/art/Suspension.java create mode 100644 test/1930-monitor-info/src/art/Test1930.java create mode 100644 test/1931-monitor-events/check create mode 100644 test/1931-monitor-events/expected.txt create mode 100644 test/1931-monitor-events/info.txt create mode 100644 test/1931-monitor-events/jvm-expected.patch create mode 100755 test/1931-monitor-events/run create mode 100644 test/1931-monitor-events/src/Main.java create mode 120000 test/1931-monitor-events/src/art/Monitors.java create mode 120000 test/1931-monitor-events/src/art/Suspension.java create mode 100644 test/1931-monitor-events/src/art/Test1931.java create mode 100644 test/1932-monitor-events-misc/check create mode 100644 test/1932-monitor-events-misc/expected.txt create mode 100644 test/1932-monitor-events-misc/info.txt create mode 100644 test/1932-monitor-events-misc/jvm-expected.patch create mode 100644 test/1932-monitor-events-misc/monitor_misc.cc create mode 100755 test/1932-monitor-events-misc/run create mode 100644 test/1932-monitor-events-misc/src/Main.java create mode 120000 test/1932-monitor-events-misc/src/art/Monitors.java create mode 120000 test/1932-monitor-events-misc/src/art/Suspension.java create mode 100644 test/1932-monitor-events-misc/src/art/Test1932.java create mode 100644 test/1933-monitor-current-contended/expected.txt create mode 100644 test/1933-monitor-current-contended/info.txt create mode 100755 test/1933-monitor-current-contended/run create mode 100644 test/1933-monitor-current-contended/src/Main.java create mode 120000 test/1933-monitor-current-contended/src/art/Monitors.java create mode 120000 test/1933-monitor-current-contended/src/art/Suspension.java create mode 100644 test/1933-monitor-current-contended/src/art/Test1933.java create mode 100644 test/1934-jvmti-signal-thread/expected.txt create mode 100644 test/1934-jvmti-signal-thread/info.txt create mode 100755 test/1934-jvmti-signal-thread/run create mode 100644 test/1934-jvmti-signal-thread/signal_threads.cc create mode 100644 test/1934-jvmti-signal-thread/src/Main.java create mode 120000 test/1934-jvmti-signal-thread/src/art/Monitors.java create mode 120000 test/1934-jvmti-signal-thread/src/art/Suspension.java create mode 100644 test/1934-jvmti-signal-thread/src/art/Test1934.java create mode 120000 test/1934-jvmti-signal-thread/src/art/Threads.java create mode 100644 test/1935-get-set-current-frame-jit/expected.txt create mode 100644 test/1935-get-set-current-frame-jit/info.txt create mode 100755 test/1935-get-set-current-frame-jit/run create mode 100644 test/1935-get-set-current-frame-jit/src/Main.java create mode 120000 test/1935-get-set-current-frame-jit/src/art/Breakpoint.java create mode 120000 test/1935-get-set-current-frame-jit/src/art/Locals.java create mode 120000 test/1935-get-set-current-frame-jit/src/art/StackTrace.java create mode 120000 test/1935-get-set-current-frame-jit/src/art/Suspension.java create mode 100644 test/1936-thread-end-events/check create mode 100644 test/1936-thread-end-events/expected.txt create mode 100644 test/1936-thread-end-events/info.txt create mode 100644 test/1936-thread-end-events/jvm-expected.patch create mode 100644 test/1936-thread-end-events/method_trace.cc create mode 100755 test/1936-thread-end-events/run create mode 100644 test/1936-thread-end-events/src/Main.java create mode 100644 test/1936-thread-end-events/src/art/Test1936.java create mode 120000 test/1936-thread-end-events/src/art/Trace.java create mode 100644 test/1937-transform-soft-fail/expected.txt create mode 100644 test/1937-transform-soft-fail/info.txt create mode 100755 test/1937-transform-soft-fail/run create mode 100644 test/1937-transform-soft-fail/src/Main.java create mode 120000 test/1937-transform-soft-fail/src/art/Redefinition.java create mode 100644 test/1937-transform-soft-fail/src/art/Test1937.java create mode 100644 test/1938-transform-abstract-single-impl/expected.txt create mode 100644 test/1938-transform-abstract-single-impl/info.txt create mode 100755 test/1938-transform-abstract-single-impl/run create mode 100644 test/1938-transform-abstract-single-impl/src/Main.java create mode 120000 test/1938-transform-abstract-single-impl/src/art/Redefinition.java create mode 100644 test/1939-proxy-frames/expected.txt create mode 100644 test/1939-proxy-frames/info.txt create mode 100644 test/1939-proxy-frames/local_instance.cc create mode 100755 test/1939-proxy-frames/run create mode 100644 test/1939-proxy-frames/src/Main.java create mode 120000 test/1939-proxy-frames/src/art/Breakpoint.java create mode 120000 test/1939-proxy-frames/src/art/Locals.java create mode 120000 test/1939-proxy-frames/src/art/StackTrace.java create mode 120000 test/1939-proxy-frames/src/art/Suspension.java create mode 100644 test/1939-proxy-frames/src/art/Test1939.java create mode 100644 test/1940-ddms-ext/ddm_ext.cc create mode 100644 test/1940-ddms-ext/expected.txt create mode 100644 test/1940-ddms-ext/info.txt create mode 100755 test/1940-ddms-ext/run create mode 100644 test/1940-ddms-ext/src-art/art/Test1940.java create mode 100644 test/1940-ddms-ext/src/Main.java create mode 100644 test/1940-ddms-ext/src/art/Test1940.java create mode 100644 test/1941-dispose-stress/dispose_stress.cc create mode 100644 test/1941-dispose-stress/expected.txt create mode 100644 test/1941-dispose-stress/info.txt create mode 100755 test/1941-dispose-stress/run create mode 100644 test/1941-dispose-stress/src/Main.java create mode 120000 test/1941-dispose-stress/src/art/Breakpoint.java create mode 100644 test/1941-dispose-stress/src/art/Test1941.java create mode 120000 test/1941-dispose-stress/src/art/Trace.java create mode 100644 test/1942-suspend-raw-monitor-exit/expected.txt create mode 100644 test/1942-suspend-raw-monitor-exit/info.txt create mode 100644 test/1942-suspend-raw-monitor-exit/native_suspend_monitor.cc create mode 100755 test/1942-suspend-raw-monitor-exit/run create mode 100644 test/1942-suspend-raw-monitor-exit/src/Main.java create mode 120000 test/1942-suspend-raw-monitor-exit/src/art/Suspension.java create mode 100644 test/1942-suspend-raw-monitor-exit/src/art/Test1942.java create mode 100644 test/1943-suspend-raw-monitor-wait/expected.txt create mode 100644 test/1943-suspend-raw-monitor-wait/info.txt create mode 100644 test/1943-suspend-raw-monitor-wait/native_suspend_monitor.cc create mode 100755 test/1943-suspend-raw-monitor-wait/run create mode 100644 test/1943-suspend-raw-monitor-wait/src/Main.java create mode 120000 test/1943-suspend-raw-monitor-wait/src/art/Suspension.java create mode 100644 test/1943-suspend-raw-monitor-wait/src/art/Test1943.java create mode 100644 test/1945-proxy-method-arguments/expected.txt create mode 100644 test/1945-proxy-method-arguments/get_args.cc create mode 100644 test/1945-proxy-method-arguments/info.txt create mode 100644 test/1945-proxy-method-arguments/src/Main.java create mode 100644 test/1946-list-descriptors/descriptors.cc create mode 100644 test/1946-list-descriptors/expected.txt create mode 100644 test/1946-list-descriptors/info.txt create mode 100755 test/1946-list-descriptors/run create mode 100644 test/1946-list-descriptors/src-art/art/Test1946.java create mode 100644 test/1946-list-descriptors/src/Main.java create mode 100644 test/1946-list-descriptors/src/art/Test1946.java create mode 100644 test/1947-breakpoint-redefine-deopt/check_deopt.cc create mode 100644 test/1947-breakpoint-redefine-deopt/expected.txt create mode 100644 test/1947-breakpoint-redefine-deopt/info.txt create mode 100755 test/1947-breakpoint-redefine-deopt/run create mode 100644 test/1947-breakpoint-redefine-deopt/src/Main.java create mode 120000 test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java create mode 120000 test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java create mode 100644 test/1948-obsolete-const-method-handle/build create mode 100644 test/1948-obsolete-const-method-handle/expected.txt create mode 100644 test/1948-obsolete-const-method-handle/info.txt create mode 100755 test/1948-obsolete-const-method-handle/run create mode 100755 test/1948-obsolete-const-method-handle/util-src/build-classes create mode 100644 test/1948-obsolete-const-method-handle/util-src/info.txt create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/Main.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/Redefinition.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/Test1948.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/constmethodhandle/BaseTestInvoke.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/constmethodhandle/Responses.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/constmethodhandle/TestGenerator.java create mode 100644 test/1948-obsolete-const-method-handle/util-src/src/art/constmethodhandle/TestInvoke.java create mode 100644 test/1949-short-dex-file/expected.txt create mode 100644 test/1949-short-dex-file/info.txt create mode 100755 test/1949-short-dex-file/run create mode 100644 test/1949-short-dex-file/src/Main.java create mode 120000 test/1949-short-dex-file/src/art/Redefinition.java create mode 100644 test/1949-short-dex-file/src/art/Test1949.java create mode 100755 test/1950-unprepared-transform/check create mode 100644 test/1950-unprepared-transform/expected.txt create mode 100644 test/1950-unprepared-transform/info.txt create mode 100644 test/1950-unprepared-transform/jvm-expected.patch create mode 100755 test/1950-unprepared-transform/run create mode 100644 test/1950-unprepared-transform/src-ex/Transform.java create mode 100644 test/1950-unprepared-transform/src/Main.java create mode 120000 test/1950-unprepared-transform/src/art/Redefinition.java create mode 100644 test/1950-unprepared-transform/unprepared_transform.cc create mode 100644 test/1951-monitor-enter-no-suspend/expected.txt create mode 100644 test/1951-monitor-enter-no-suspend/info.txt create mode 100644 test/1951-monitor-enter-no-suspend/raw_monitor.cc create mode 100755 test/1951-monitor-enter-no-suspend/run create mode 100644 test/1951-monitor-enter-no-suspend/src/Main.java create mode 120000 test/1951-monitor-enter-no-suspend/src/art/Main.java create mode 120000 test/1951-monitor-enter-no-suspend/src/art/Suspension.java create mode 100644 test/1951-monitor-enter-no-suspend/src/art/Test1951.java create mode 100755 test/1953-pop-frame/check create mode 100644 test/1953-pop-frame/class-loading-expected.patch create mode 100644 test/1953-pop-frame/expected.txt create mode 100644 test/1953-pop-frame/info.txt create mode 100644 test/1953-pop-frame/pop_frame.cc create mode 100755 test/1953-pop-frame/run create mode 100644 test/1953-pop-frame/src/Main.java create mode 120000 test/1953-pop-frame/src/art/Breakpoint.java create mode 120000 test/1953-pop-frame/src/art/Redefinition.java create mode 120000 test/1953-pop-frame/src/art/StackTrace.java create mode 120000 test/1953-pop-frame/src/art/SuspendEvents.java create mode 120000 test/1953-pop-frame/src/art/Suspension.java create mode 100644 test/1953-pop-frame/src/art/Test1953.java create mode 100755 test/1954-pop-frame-jit/check create mode 100644 test/1954-pop-frame-jit/expected.txt create mode 100644 test/1954-pop-frame-jit/info.txt create mode 100644 test/1954-pop-frame-jit/jvm-expected.patch create mode 100755 test/1954-pop-frame-jit/run create mode 100644 test/1954-pop-frame-jit/src/Main.java create mode 120000 test/1954-pop-frame-jit/src/art/Breakpoint.java create mode 120000 test/1954-pop-frame-jit/src/art/Redefinition.java create mode 120000 test/1954-pop-frame-jit/src/art/StackTrace.java create mode 120000 test/1954-pop-frame-jit/src/art/SuspendEvents.java create mode 120000 test/1954-pop-frame-jit/src/art/Suspension.java create mode 120000 test/1954-pop-frame-jit/src/art/Test1953.java create mode 100755 test/1955-pop-frame-jit-called/check create mode 100644 test/1955-pop-frame-jit-called/expected.txt create mode 100644 test/1955-pop-frame-jit-called/info.txt create mode 100644 test/1955-pop-frame-jit-called/jvm-expected.patch create mode 100755 test/1955-pop-frame-jit-called/run create mode 100644 test/1955-pop-frame-jit-called/src/Main.java create mode 120000 test/1955-pop-frame-jit-called/src/art/Breakpoint.java create mode 120000 test/1955-pop-frame-jit-called/src/art/Redefinition.java create mode 120000 test/1955-pop-frame-jit-called/src/art/StackTrace.java create mode 120000 test/1955-pop-frame-jit-called/src/art/SuspendEvents.java create mode 120000 test/1955-pop-frame-jit-called/src/art/Suspension.java create mode 120000 test/1955-pop-frame-jit-called/src/art/Test1953.java create mode 100755 test/1956-pop-frame-jit-calling/check create mode 100644 test/1956-pop-frame-jit-calling/expected.txt create mode 100644 test/1956-pop-frame-jit-calling/info.txt create mode 100644 test/1956-pop-frame-jit-calling/jvm-expected.patch create mode 100755 test/1956-pop-frame-jit-calling/run create mode 100644 test/1956-pop-frame-jit-calling/src/Main.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/Breakpoint.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/Redefinition.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/StackTrace.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/SuspendEvents.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/Suspension.java create mode 120000 test/1956-pop-frame-jit-calling/src/art/Test1953.java create mode 100644 test/1957-error-ext/expected.txt create mode 100644 test/1957-error-ext/info.txt create mode 100644 test/1957-error-ext/lasterror.cc create mode 100755 test/1957-error-ext/run create mode 100644 test/1957-error-ext/src/Main.java create mode 120000 test/1957-error-ext/src/art/Redefinition.java create mode 100644 test/1957-error-ext/src/art/Test1957.java create mode 100644 test/1958-transform-try-jit/expected.txt create mode 100644 test/1958-transform-try-jit/info.txt create mode 100755 test/1958-transform-try-jit/run create mode 100644 test/1958-transform-try-jit/src/Main.java create mode 120000 test/1958-transform-try-jit/src/art/Redefinition.java create mode 100644 test/1958-transform-try-jit/src/art/Test1958.java create mode 100644 test/1959-redefine-object-instrument/expected.txt create mode 100644 test/1959-redefine-object-instrument/fake_redef_object.cc create mode 100644 test/1959-redefine-object-instrument/info.txt create mode 100755 test/1959-redefine-object-instrument/run create mode 100644 test/1959-redefine-object-instrument/src/Main.java create mode 120000 test/1959-redefine-object-instrument/src/art/Breakpoint.java create mode 100644 test/1960-checker-bounds-codegen/expected.txt create mode 100644 test/1960-checker-bounds-codegen/info.txt create mode 100644 test/1960-checker-bounds-codegen/src/Main.java create mode 100644 test/1960-obsolete-jit-multithread-native/expected.txt create mode 100644 test/1960-obsolete-jit-multithread-native/info.txt create mode 100644 test/1960-obsolete-jit-multithread-native/native_say_hi.cc create mode 100755 test/1960-obsolete-jit-multithread-native/run create mode 100644 test/1960-obsolete-jit-multithread-native/src/Main.java create mode 100644 test/1960-obsolete-jit-multithread-native/src/Transform.java create mode 120000 test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java create mode 100644 test/1961-checker-loop-vectorizer/expected.txt create mode 100644 test/1961-checker-loop-vectorizer/info.txt create mode 100644 test/1961-checker-loop-vectorizer/src/Main.java create mode 100644 test/1961-obsolete-jit-multithread/expected.txt create mode 100644 test/1961-obsolete-jit-multithread/info.txt create mode 100755 test/1961-obsolete-jit-multithread/run create mode 100644 test/1961-obsolete-jit-multithread/src/Main.java create mode 100644 test/1961-obsolete-jit-multithread/src/Transform.java create mode 120000 test/1961-obsolete-jit-multithread/src/art/Redefinition.java create mode 100644 test/1962-multi-thread-events/expected.txt create mode 100644 test/1962-multi-thread-events/info.txt create mode 100644 test/1962-multi-thread-events/multi_thread_events.cc create mode 100755 test/1962-multi-thread-events/run create mode 100644 test/1962-multi-thread-events/src/Main.java create mode 100644 test/1962-multi-thread-events/src/art/Test1962.java create mode 100644 test/1963-add-to-dex-classloader-in-memory/add_to_loader.cc create mode 100755 test/1963-add-to-dex-classloader-in-memory/check create mode 100644 test/1963-add-to-dex-classloader-in-memory/check_memfd_create.cc create mode 100644 test/1963-add-to-dex-classloader-in-memory/expected.txt create mode 100644 test/1963-add-to-dex-classloader-in-memory/info.txt create mode 100755 test/1963-add-to-dex-classloader-in-memory/run create mode 100644 test/1963-add-to-dex-classloader-in-memory/src/Main.java create mode 120000 test/1963-add-to-dex-classloader-in-memory/src/art/Redefinition.java create mode 100644 test/1963-add-to-dex-classloader-in-memory/src/art/Test1963.java create mode 100644 test/1964-add-to-dex-classloader-file/add_to_loader.cc create mode 100644 test/1964-add-to-dex-classloader-file/expected.txt create mode 100644 test/1964-add-to-dex-classloader-file/info.txt create mode 100755 test/1964-add-to-dex-classloader-file/run create mode 100644 test/1964-add-to-dex-classloader-file/src-ex/foobar/NewClass.java create mode 100644 test/1964-add-to-dex-classloader-file/src/Main.java create mode 120000 test/1964-add-to-dex-classloader-file/src/art/Breakpoint.java create mode 120000 test/1964-add-to-dex-classloader-file/src/art/Redefinition.java create mode 120000 test/1964-add-to-dex-classloader-file/src/art/StackTrace.java create mode 120000 test/1964-add-to-dex-classloader-file/src/art/Suspension.java create mode 100644 test/1965-get-set-local-primitive-no-tables/build create mode 100644 test/1965-get-set-local-primitive-no-tables/expected.txt create mode 100644 test/1965-get-set-local-primitive-no-tables/info.txt create mode 100644 test/1965-get-set-local-primitive-no-tables/jasmin/TestCases1965.j create mode 100755 test/1965-get-set-local-primitive-no-tables/run create mode 100644 test/1965-get-set-local-primitive-no-tables/smali/TestCases1965.smali create mode 100644 test/1965-get-set-local-primitive-no-tables/src/Main.java create mode 120000 test/1965-get-set-local-primitive-no-tables/src/art/Breakpoint.java create mode 120000 test/1965-get-set-local-primitive-no-tables/src/art/Locals.java create mode 120000 test/1965-get-set-local-primitive-no-tables/src/art/StackTrace.java create mode 120000 test/1965-get-set-local-primitive-no-tables/src/art/Suspension.java create mode 100644 test/1965-get-set-local-primitive-no-tables/src/art/Test1965.java create mode 100644 test/1966-get-set-local-objects-no-table/build create mode 100644 test/1966-get-set-local-objects-no-table/expected.txt create mode 100644 test/1966-get-set-local-objects-no-table/info.txt create mode 100644 test/1966-get-set-local-objects-no-table/jasmin/TestCases1966.j create mode 100755 test/1966-get-set-local-objects-no-table/run create mode 100644 test/1966-get-set-local-objects-no-table/smali/TestCases1966.smali create mode 100644 test/1966-get-set-local-objects-no-table/src/Main.java create mode 120000 test/1966-get-set-local-objects-no-table/src/art/Breakpoint.java create mode 120000 test/1966-get-set-local-objects-no-table/src/art/Locals.java create mode 120000 test/1966-get-set-local-objects-no-table/src/art/StackTrace.java create mode 120000 test/1966-get-set-local-objects-no-table/src/art/Suspension.java create mode 100644 test/1966-get-set-local-objects-no-table/src/art/Test1966.java create mode 100644 test/1967-get-set-local-bad-slot/expected.txt create mode 100644 test/1967-get-set-local-bad-slot/info.txt create mode 100755 test/1967-get-set-local-bad-slot/run create mode 100644 test/1967-get-set-local-bad-slot/src/Main.java create mode 120000 test/1967-get-set-local-bad-slot/src/art/Breakpoint.java create mode 120000 test/1967-get-set-local-bad-slot/src/art/Locals.java create mode 120000 test/1967-get-set-local-bad-slot/src/art/StackTrace.java create mode 120000 test/1967-get-set-local-bad-slot/src/art/Suspension.java create mode 100644 test/1967-get-set-local-bad-slot/src/art/Test1967.java create mode 100644 test/1968-force-early-return/expected.txt create mode 100644 test/1968-force-early-return/force_early_return.cc create mode 100644 test/1968-force-early-return/info.txt create mode 100755 test/1968-force-early-return/run create mode 100644 test/1968-force-early-return/src/Main.java create mode 120000 test/1968-force-early-return/src/art/Breakpoint.java create mode 120000 test/1968-force-early-return/src/art/NonStandardExit.java create mode 120000 test/1968-force-early-return/src/art/StackTrace.java create mode 120000 test/1968-force-early-return/src/art/SuspendEvents.java create mode 120000 test/1968-force-early-return/src/art/Suspension.java create mode 100644 test/1968-force-early-return/src/art/Test1968.java create mode 100755 test/1969-force-early-return-void/check create mode 100644 test/1969-force-early-return-void/class-loading-expected.patch create mode 100644 test/1969-force-early-return-void/expected.txt create mode 100644 test/1969-force-early-return-void/force_early_return_void.cc create mode 100644 test/1969-force-early-return-void/info.txt create mode 100755 test/1969-force-early-return-void/run create mode 100644 test/1969-force-early-return-void/src/Main.java create mode 120000 test/1969-force-early-return-void/src/art/Breakpoint.java create mode 120000 test/1969-force-early-return-void/src/art/NonStandardExit.java create mode 120000 test/1969-force-early-return-void/src/art/StackTrace.java create mode 120000 test/1969-force-early-return-void/src/art/SuspendEvents.java create mode 120000 test/1969-force-early-return-void/src/art/Suspension.java create mode 100644 test/1969-force-early-return-void/src/art/Test1969.java create mode 100644 test/1970-force-early-return-long/expected.txt create mode 100644 test/1970-force-early-return-long/force_early_return_long.cc create mode 100644 test/1970-force-early-return-long/info.txt create mode 100755 test/1970-force-early-return-long/run create mode 100644 test/1970-force-early-return-long/src/Main.java create mode 120000 test/1970-force-early-return-long/src/art/Breakpoint.java create mode 120000 test/1970-force-early-return-long/src/art/NonStandardExit.java create mode 120000 test/1970-force-early-return-long/src/art/StackTrace.java create mode 120000 test/1970-force-early-return-long/src/art/SuspendEvents.java create mode 120000 test/1970-force-early-return-long/src/art/Suspension.java create mode 100644 test/1970-force-early-return-long/src/art/Test1970.java create mode 100644 test/1971-multi-force-early-return/expected.txt create mode 100644 test/1971-multi-force-early-return/info.txt create mode 100755 test/1971-multi-force-early-return/run create mode 100644 test/1971-multi-force-early-return/src/Main.java create mode 120000 test/1971-multi-force-early-return/src/art/Breakpoint.java create mode 120000 test/1971-multi-force-early-return/src/art/NonStandardExit.java create mode 120000 test/1971-multi-force-early-return/src/art/StackTrace.java create mode 120000 test/1971-multi-force-early-return/src/art/SuspendEvents.java create mode 120000 test/1971-multi-force-early-return/src/art/Suspension.java create mode 100644 test/1971-multi-force-early-return/src/art/Test1971.java create mode 100644 test/1972-jni-id-swap-indices/expected.txt create mode 100644 test/1972-jni-id-swap-indices/info.txt create mode 100644 test/1972-jni-id-swap-indices/jni_id.cc create mode 100755 test/1972-jni-id-swap-indices/run create mode 100644 test/1972-jni-id-swap-indices/src/Main.java create mode 100644 test/1973-jni-id-swap-pointer/expected.txt create mode 100644 test/1973-jni-id-swap-pointer/info.txt create mode 100755 test/1973-jni-id-swap-pointer/run create mode 100644 test/1973-jni-id-swap-pointer/src/Main.java create mode 100644 test/1974-resize-array/expected.txt create mode 100644 test/1974-resize-array/info.txt create mode 100644 test/1974-resize-array/resize_array.cc create mode 100755 test/1974-resize-array/run create mode 100644 test/1974-resize-array/src/Main.java create mode 120000 test/1974-resize-array/src/art/Main.java create mode 100644 test/1974-resize-array/src/art/Test1974.java create mode 100644 test/1975-hello-structural-transformation/expected.txt create mode 100644 test/1975-hello-structural-transformation/info.txt create mode 100755 test/1975-hello-structural-transformation/run create mode 100644 test/1975-hello-structural-transformation/src/Main.java create mode 120000 test/1975-hello-structural-transformation/src/art/Redefinition.java create mode 100644 test/1975-hello-structural-transformation/src/art/Test1975.java create mode 100644 test/1975-hello-structural-transformation/src/art/Transform1975.java create mode 100644 test/1975-hello-structural-transformation/structural_transform.cc create mode 100644 test/1976-hello-structural-static-methods/expected.txt create mode 100644 test/1976-hello-structural-static-methods/info.txt create mode 100755 test/1976-hello-structural-static-methods/run create mode 100644 test/1976-hello-structural-static-methods/src/Main.java create mode 120000 test/1976-hello-structural-static-methods/src/art/Redefinition.java create mode 100644 test/1976-hello-structural-static-methods/src/art/Test1976.java create mode 100644 test/1976-hello-structural-static-methods/src/art/Transform1976.java create mode 100644 test/1976-hello-structural-static-methods/structural_transform_methods.cc create mode 100644 test/1977-hello-structural-obsolescence/expected.txt create mode 100644 test/1977-hello-structural-obsolescence/info.txt create mode 100755 test/1977-hello-structural-obsolescence/run create mode 100644 test/1977-hello-structural-obsolescence/src/Main.java create mode 120000 test/1977-hello-structural-obsolescence/src/art/Redefinition.java create mode 100644 test/1977-hello-structural-obsolescence/src/art/Test1977.java create mode 100644 test/1978-regular-obsolete-then-structural-obsolescence/expected.txt create mode 100644 test/1978-regular-obsolete-then-structural-obsolescence/info.txt create mode 100755 test/1978-regular-obsolete-then-structural-obsolescence/run create mode 100644 test/1978-regular-obsolete-then-structural-obsolescence/src/Main.java create mode 120000 test/1978-regular-obsolete-then-structural-obsolescence/src/art/Redefinition.java create mode 100644 test/1978-regular-obsolete-then-structural-obsolescence/src/art/Test1978.java create mode 100644 test/1979-threaded-structural-transformation/expected.txt create mode 100644 test/1979-threaded-structural-transformation/info.txt create mode 100755 test/1979-threaded-structural-transformation/run create mode 100644 test/1979-threaded-structural-transformation/src/Main.java create mode 120000 test/1979-threaded-structural-transformation/src/art/Redefinition.java create mode 100644 test/1979-threaded-structural-transformation/src/art/Test1979.java create mode 100644 test/1980-obsolete-object-cleared/expected.txt create mode 100644 test/1980-obsolete-object-cleared/info.txt create mode 100755 test/1980-obsolete-object-cleared/run create mode 100644 test/1980-obsolete-object-cleared/src/Main.java create mode 120000 test/1980-obsolete-object-cleared/src/art/Redefinition.java create mode 100755 test/1981-structural-redef-private-method-handles/build create mode 100644 test/1981-structural-redef-private-method-handles/expected.txt create mode 100644 test/1981-structural-redef-private-method-handles/expected_no_mh.txt create mode 100644 test/1981-structural-redef-private-method-handles/info.txt create mode 100755 test/1981-structural-redef-private-method-handles/run create mode 100644 test/1981-structural-redef-private-method-handles/src/Main.java create mode 120000 test/1981-structural-redef-private-method-handles/src/art/Redefinition.java create mode 100644 test/1981-structural-redef-private-method-handles/src/art/Test1981.java create mode 100644 test/1981-structural-redef-private-method-handles/src/art/Test1981_Varhandles.java create mode 100644 test/1982-no-virtuals-structural-redefinition/expected.txt create mode 100644 test/1982-no-virtuals-structural-redefinition/info.txt create mode 100755 test/1982-no-virtuals-structural-redefinition/run create mode 100644 test/1982-no-virtuals-structural-redefinition/src/Main.java create mode 120000 test/1982-no-virtuals-structural-redefinition/src/art/Redefinition.java create mode 100644 test/1982-no-virtuals-structural-redefinition/src/art/Test1982.java create mode 100755 test/1983-structural-redefinition-failures/build create mode 100644 test/1983-structural-redefinition-failures/expected-cts.txt create mode 100644 test/1983-structural-redefinition-failures/expected.txt create mode 100644 test/1983-structural-redefinition-failures/info.txt create mode 100755 test/1983-structural-redefinition-failures/run create mode 100644 test/1983-structural-redefinition-failures/src/Main.java create mode 120000 test/1983-structural-redefinition-failures/src/art/Redefinition.java create mode 100644 test/1983-structural-redefinition-failures/src/art/Test1983.java create mode 100644 test/1984-structural-redefine-field-trace/expected.txt create mode 100644 test/1984-structural-redefine-field-trace/info.txt create mode 100755 test/1984-structural-redefine-field-trace/run create mode 100644 test/1984-structural-redefine-field-trace/src/Main.java create mode 120000 test/1984-structural-redefine-field-trace/src/art/Redefinition.java create mode 100644 test/1984-structural-redefine-field-trace/src/art/Test1984.java create mode 120000 test/1984-structural-redefine-field-trace/src/art/Trace.java create mode 100644 test/1985-structural-redefine-stack-scope/expected.txt create mode 100644 test/1985-structural-redefine-stack-scope/info.txt create mode 100755 test/1985-structural-redefine-stack-scope/run create mode 100644 test/1985-structural-redefine-stack-scope/src/Main.java create mode 120000 test/1985-structural-redefine-stack-scope/src/art/Redefinition.java create mode 100644 test/1985-structural-redefine-stack-scope/stack_scope.cc create mode 100644 test/1986-structural-redefine-multi-thread-stack-scope/expected.txt create mode 100644 test/1986-structural-redefine-multi-thread-stack-scope/info.txt create mode 100755 test/1986-structural-redefine-multi-thread-stack-scope/run create mode 100644 test/1986-structural-redefine-multi-thread-stack-scope/src/Main.java create mode 120000 test/1986-structural-redefine-multi-thread-stack-scope/src/art/Redefinition.java create mode 100644 test/1987-structural-redefine-recursive-stack-scope/expected.txt create mode 100644 test/1987-structural-redefine-recursive-stack-scope/info.txt create mode 100755 test/1987-structural-redefine-recursive-stack-scope/run create mode 100644 test/1987-structural-redefine-recursive-stack-scope/src/Main.java create mode 120000 test/1987-structural-redefine-recursive-stack-scope/src/art/Redefinition.java create mode 100644 test/1988-multi-structural-redefine/expected.txt create mode 100644 test/1988-multi-structural-redefine/info.txt create mode 100755 test/1988-multi-structural-redefine/run create mode 100644 test/1988-multi-structural-redefine/src/Main.java create mode 120000 test/1988-multi-structural-redefine/src/art/Redefinition.java create mode 100644 test/1988-multi-structural-redefine/src/art/Test1988.java create mode 100644 test/1989-transform-bad-monitor/expected.txt create mode 100644 test/1989-transform-bad-monitor/info.txt create mode 100755 test/1989-transform-bad-monitor/run create mode 100644 test/1989-transform-bad-monitor/src/Main.java create mode 120000 test/1989-transform-bad-monitor/src/art/Redefinition.java create mode 100644 test/1989-transform-bad-monitor/src/art/Test1989.java create mode 100644 test/1990-structural-bad-verify/expected.txt create mode 100644 test/1990-structural-bad-verify/info.txt create mode 100755 test/1990-structural-bad-verify/run create mode 100644 test/1990-structural-bad-verify/src/Main.java create mode 120000 test/1990-structural-bad-verify/src/art/Redefinition.java create mode 100644 test/1990-structural-bad-verify/src/art/Test1990.java create mode 100644 test/1991-hello-structural-retransform/expected.txt create mode 100644 test/1991-hello-structural-retransform/info.txt create mode 100755 test/1991-hello-structural-retransform/run create mode 100644 test/1991-hello-structural-retransform/src/Main.java create mode 120000 test/1991-hello-structural-retransform/src/art/Redefinition.java create mode 100644 test/1991-hello-structural-retransform/src/art/Test1991.java create mode 100644 test/1992-retransform-no-such-field/expected.txt create mode 100644 test/1992-retransform-no-such-field/info.txt create mode 100755 test/1992-retransform-no-such-field/run create mode 100644 test/1992-retransform-no-such-field/src/Main.java create mode 120000 test/1992-retransform-no-such-field/src/art/Redefinition.java create mode 100644 test/1992-retransform-no-such-field/src/art/Test1992.java create mode 100644 test/1993-fallback-non-structural/expected.txt create mode 100644 test/1993-fallback-non-structural/info.txt create mode 100755 test/1993-fallback-non-structural/run create mode 100644 test/1993-fallback-non-structural/src/Main.java create mode 120000 test/1993-fallback-non-structural/src/art/Redefinition.java create mode 100644 test/1993-fallback-non-structural/src/art/Test1993.java create mode 100644 test/1994-final-virtual-structural/expected.txt create mode 100644 test/1994-final-virtual-structural/info.txt create mode 100755 test/1994-final-virtual-structural/run create mode 100644 test/1994-final-virtual-structural/src/Main.java create mode 120000 test/1994-final-virtual-structural/src/art/Redefinition.java create mode 100644 test/1994-final-virtual-structural/src/art/Test1994.java create mode 100644 test/1995-final-virtual-structural-multithread/expected.txt create mode 100644 test/1995-final-virtual-structural-multithread/info.txt create mode 100755 test/1995-final-virtual-structural-multithread/run create mode 100644 test/1995-final-virtual-structural-multithread/src/Main.java create mode 120000 test/1995-final-virtual-structural-multithread/src/art/Redefinition.java create mode 100644 test/1995-final-virtual-structural-multithread/src/art/Test1995.java create mode 100644 test/1996-final-override-virtual-structural/expected.txt create mode 100644 test/1996-final-override-virtual-structural/info.txt create mode 100755 test/1996-final-override-virtual-structural/run create mode 100644 test/1996-final-override-virtual-structural/src/Main.java create mode 120000 test/1996-final-override-virtual-structural/src/art/Redefinition.java create mode 100644 test/1996-final-override-virtual-structural/src/art/Test1996.java create mode 100644 test/1997-structural-shadow-method/expected.txt create mode 100644 test/1997-structural-shadow-method/info.txt create mode 100755 test/1997-structural-shadow-method/run create mode 100644 test/1997-structural-shadow-method/src/Main.java create mode 120000 test/1997-structural-shadow-method/src/art/Redefinition.java create mode 100644 test/1997-structural-shadow-method/src/art/Test1997.java create mode 100644 test/1998-structural-shadow-field/expected.txt create mode 100644 test/1998-structural-shadow-field/info.txt create mode 100755 test/1998-structural-shadow-field/run create mode 100644 test/1998-structural-shadow-field/src/Main.java create mode 120000 test/1998-structural-shadow-field/src/art/Redefinition.java create mode 100644 test/1998-structural-shadow-field/src/art/Test1998.java create mode 100644 test/1999-virtual-structural/expected.txt create mode 100644 test/1999-virtual-structural/info.txt create mode 100755 test/1999-virtual-structural/run create mode 100644 test/1999-virtual-structural/src/Main.java create mode 120000 test/1999-virtual-structural/src/art/Redefinition.java create mode 100644 test/1999-virtual-structural/src/art/Test1999.java create mode 100644 test/2000-virtual-list-structural/AbstractCollection.patch create mode 100755 test/2000-virtual-list-structural/build create mode 100644 test/2000-virtual-list-structural/expected.txt create mode 100644 test/2000-virtual-list-structural/info.txt create mode 100755 test/2000-virtual-list-structural/run create mode 120000 test/2000-virtual-list-structural/src-ex/java/util/AbstractCollection.java create mode 100644 test/2000-virtual-list-structural/src/Main.java create mode 120000 test/2000-virtual-list-structural/src/art/Redefinition.java create mode 100644 test/2001-virtual-structural-multithread/expected.txt create mode 100644 test/2001-virtual-structural-multithread/info.txt create mode 100755 test/2001-virtual-structural-multithread/run create mode 100644 test/2001-virtual-structural-multithread/src-art/Main.java create mode 120000 test/2001-virtual-structural-multithread/src-art/art/Redefinition.java create mode 100644 test/2001-virtual-structural-multithread/src-art/art/Test2001.java create mode 100644 test/2001-virtual-structural-multithread/src/Main.java create mode 100644 test/2002-virtual-structural-initializing/expected.txt create mode 100644 test/2002-virtual-structural-initializing/info.txt create mode 100755 test/2002-virtual-structural-initializing/run create mode 100644 test/2002-virtual-structural-initializing/src-art/Main.java create mode 120000 test/2002-virtual-structural-initializing/src-art/art/Redefinition.java create mode 100644 test/2002-virtual-structural-initializing/src-art/art/Test2002.java create mode 100644 test/2002-virtual-structural-initializing/src/Main.java create mode 100644 test/2003-double-virtual-structural/expected.txt create mode 100644 test/2003-double-virtual-structural/info.txt create mode 100755 test/2003-double-virtual-structural/run create mode 100644 test/2003-double-virtual-structural/src/Main.java create mode 120000 test/2003-double-virtual-structural/src/art/Redefinition.java create mode 100644 test/2003-double-virtual-structural/src/art/Test2003.java create mode 100644 test/2004-double-virtual-structural-abstract/expected.txt create mode 100644 test/2004-double-virtual-structural-abstract/info.txt create mode 100755 test/2004-double-virtual-structural-abstract/run create mode 100644 test/2004-double-virtual-structural-abstract/src/Main.java create mode 120000 test/2004-double-virtual-structural-abstract/src/art/Redefinition.java create mode 100644 test/2004-double-virtual-structural-abstract/src/art/Test2004.java create mode 100644 test/2005-pause-all-redefine-multithreaded/expected.txt create mode 100644 test/2005-pause-all-redefine-multithreaded/info.txt create mode 100644 test/2005-pause-all-redefine-multithreaded/pause-all.cc create mode 100755 test/2005-pause-all-redefine-multithreaded/run create mode 100644 test/2005-pause-all-redefine-multithreaded/src/Main.java create mode 120000 test/2005-pause-all-redefine-multithreaded/src/art/Redefinition.java create mode 120000 test/2005-pause-all-redefine-multithreaded/src/art/Suspension.java create mode 100644 test/2005-pause-all-redefine-multithreaded/src/art/Test2005.java create mode 100644 test/2006-virtual-structural-finalizing/expected.txt create mode 100644 test/2006-virtual-structural-finalizing/info.txt create mode 100755 test/2006-virtual-structural-finalizing/run create mode 100644 test/2006-virtual-structural-finalizing/src-art/Main.java create mode 120000 test/2006-virtual-structural-finalizing/src-art/art/Redefinition.java create mode 100644 test/2006-virtual-structural-finalizing/src-art/art/Test2006.java create mode 100644 test/2006-virtual-structural-finalizing/src/Main.java create mode 100644 test/2007-virtual-structural-finalizable/expected.txt create mode 100644 test/2007-virtual-structural-finalizable/info.txt create mode 100755 test/2007-virtual-structural-finalizable/run create mode 100644 test/2007-virtual-structural-finalizable/src-art/Main.java create mode 120000 test/2007-virtual-structural-finalizable/src-art/art/Redefinition.java create mode 100644 test/2007-virtual-structural-finalizable/src-art/art/Test2007.java create mode 100644 test/2007-virtual-structural-finalizable/src/Main.java create mode 100644 test/2008-redefine-then-old-reflect-field/expected.txt create mode 100644 test/2008-redefine-then-old-reflect-field/info.txt create mode 100755 test/2008-redefine-then-old-reflect-field/run create mode 100644 test/2008-redefine-then-old-reflect-field/src/Main.java create mode 120000 test/2008-redefine-then-old-reflect-field/src/art/Redefinition.java create mode 100644 test/2008-redefine-then-old-reflect-field/src/art/Test2008.java create mode 100644 test/2009-structural-local-ref/expected.txt create mode 100644 test/2009-structural-local-ref/info.txt create mode 100644 test/2009-structural-local-ref/local-ref.cc create mode 100755 test/2009-structural-local-ref/run create mode 100644 test/2009-structural-local-ref/src-art/Main.java create mode 120000 test/2009-structural-local-ref/src-art/art/Redefinition.java create mode 100644 test/2009-structural-local-ref/src-art/art/Test2009.java create mode 100644 test/2009-structural-local-ref/src/Main.java create mode 100644 test/201-built-in-except-detail-messages/expected.txt create mode 100644 test/201-built-in-except-detail-messages/info.txt create mode 100644 test/201-built-in-except-detail-messages/src/Main.java create mode 100644 test/2011-stack-walk-concurrent-instrument/expected.txt create mode 100644 test/2011-stack-walk-concurrent-instrument/info.txt create mode 100644 test/2011-stack-walk-concurrent-instrument/src/Main.java create mode 100644 test/2011-stack-walk-concurrent-instrument/stack_walk_concurrent.cc create mode 100644 test/2012-structural-redefinition-failures-jni-id/expected.txt create mode 100644 test/2012-structural-redefinition-failures-jni-id/info.txt create mode 100755 test/2012-structural-redefinition-failures-jni-id/run create mode 100644 test/2012-structural-redefinition-failures-jni-id/set-jni-id-used.cc create mode 100644 test/2012-structural-redefinition-failures-jni-id/src-art/Main.java create mode 120000 test/2012-structural-redefinition-failures-jni-id/src-art/art/Redefinition.java create mode 100644 test/2012-structural-redefinition-failures-jni-id/src-art/art/Test1983.java create mode 100644 test/2019-constantcalculationsinking/expected.txt create mode 100644 test/2019-constantcalculationsinking/info.txt create mode 100644 test/2019-constantcalculationsinking/src/Main.java create mode 100644 test/202-thread-oome/expected.txt create mode 100644 test/202-thread-oome/info.txt create mode 100644 test/202-thread-oome/src/Main.java create mode 100644 test/2020-InvokeVirtual-Inlining/expected.txt create mode 100644 test/2020-InvokeVirtual-Inlining/info.txt create mode 100644 test/2020-InvokeVirtual-Inlining/src/Main.java create mode 100644 test/2020-InvokeVirtual-Inlining/src/Test.java create mode 100644 test/2021-InvokeStatic-Inlining/expected.txt create mode 100644 test/2021-InvokeStatic-Inlining/info.txt create mode 100644 test/2021-InvokeStatic-Inlining/src/Main.java create mode 100644 test/2022-Invariantloops/expected.txt create mode 100644 test/2022-Invariantloops/info.txt create mode 100644 test/2022-Invariantloops/src/Main.java create mode 100644 test/2023-InvariantLoops_typecast/expected.txt create mode 100644 test/2023-InvariantLoops_typecast/info.txt create mode 100644 test/2023-InvariantLoops_typecast/src/Main.java create mode 100644 test/2024-InvariantNegativeLoop/expected.txt create mode 100644 test/2024-InvariantNegativeLoop/info.txt create mode 100644 test/2024-InvariantNegativeLoop/src/Main.java create mode 100644 test/2025-ChangedArrayValue/expected.txt create mode 100644 test/2025-ChangedArrayValue/info.txt create mode 100644 test/2025-ChangedArrayValue/src/Main.java create mode 100644 test/2026-DifferentMemoryLSCouples/expected.txt create mode 100644 test/2026-DifferentMemoryLSCouples/info.txt create mode 100644 test/2026-DifferentMemoryLSCouples/src/Main.java create mode 100644 test/2027-TwiceTheSameMemoryCouple/expected.txt create mode 100644 test/2027-TwiceTheSameMemoryCouple/info.txt create mode 100644 test/2027-TwiceTheSameMemoryCouple/src/Main.java create mode 100644 test/2028-MultiBackward/expected.txt create mode 100644 test/2028-MultiBackward/info.txt create mode 100644 test/2028-MultiBackward/src/Main.java create mode 100644 test/2029-contended-monitors/expected.txt create mode 100644 test/2029-contended-monitors/info.txt create mode 100644 test/2029-contended-monitors/src/Main.java create mode 100755 test/2029-spaces-in-SimpleName/build create mode 100644 test/2029-spaces-in-SimpleName/classes.dex create mode 100644 test/2029-spaces-in-SimpleName/expected.txt create mode 100644 test/2029-spaces-in-SimpleName/info.txt create mode 100644 test/2029-spaces-in-SimpleName/src/SpacesInSimpleName.java create mode 100644 test/203-multi-checkpoint/expected.txt create mode 100644 test/203-multi-checkpoint/info.txt create mode 100644 test/203-multi-checkpoint/multi_checkpoint.cc create mode 100644 test/203-multi-checkpoint/src/Main.java create mode 100644 test/2030-long-running-child/expected.txt create mode 100644 test/2030-long-running-child/info.txt create mode 100644 test/2030-long-running-child/src/Main.java create mode 100644 test/2031-zygote-compiled-frame-deopt/expected.txt create mode 100644 test/2031-zygote-compiled-frame-deopt/info.txt create mode 100644 test/2031-zygote-compiled-frame-deopt/native-wait.cc create mode 100755 test/2031-zygote-compiled-frame-deopt/run create mode 100644 test/2031-zygote-compiled-frame-deopt/src/Main.java create mode 120000 test/2031-zygote-compiled-frame-deopt/src/art/Redefinition.java create mode 100644 test/2031-zygote-compiled-frame-deopt/src/art/Test2031.java create mode 100644 test/2032-default-method-private-override/expected.txt create mode 100644 test/2032-default-method-private-override/info.txt create mode 100644 test/2032-default-method-private-override/jasmin/Concrete1.j create mode 100644 test/2032-default-method-private-override/jasmin/Concrete2.j create mode 100644 test/2032-default-method-private-override/jasmin/Concrete3.j create mode 100644 test/2032-default-method-private-override/src/Concrete2Base.java create mode 100644 test/2032-default-method-private-override/src/IFace.java create mode 100644 test/2032-default-method-private-override/src/Main.java create mode 100644 test/2035-structural-native-method/expected.txt create mode 100644 test/2035-structural-native-method/info.txt create mode 100755 test/2035-structural-native-method/run create mode 100644 test/2035-structural-native-method/src-art/Main.java create mode 120000 test/2035-structural-native-method/src-art/art/Redefinition.java create mode 100644 test/2035-structural-native-method/src-art/art/Test2035.java create mode 100644 test/2035-structural-native-method/src/Main.java create mode 100644 test/2035-structural-native-method/structural-native.cc create mode 100644 test/2036-structural-subclass-shadow/expected.txt create mode 100644 test/2036-structural-subclass-shadow/info.txt create mode 100755 test/2036-structural-subclass-shadow/run create mode 100644 test/2036-structural-subclass-shadow/src-art/Main.java create mode 120000 test/2036-structural-subclass-shadow/src-art/art/Redefinition.java create mode 100644 test/2036-structural-subclass-shadow/src-art/art/Test2036.java create mode 100644 test/2036-structural-subclass-shadow/src/Main.java create mode 100644 test/2230-profile-save-hotness/expected.txt create mode 100644 test/2230-profile-save-hotness/info.txt create mode 100644 test/2230-profile-save-hotness/run create mode 100644 test/2230-profile-save-hotness/src-art/Main.java create mode 100644 test/300-package-override/expected.txt create mode 100644 test/300-package-override/info.txt create mode 100644 test/300-package-override/src/Main.java create mode 100644 test/300-package-override/src/p1/BaseClass.java create mode 100644 test/300-package-override/src/p2/DerivedClass.java create mode 100644 test/300-package-override/src/p2/DerivedClass2.java create mode 100644 test/301-abstract-protected/expected.txt create mode 100644 test/301-abstract-protected/info.txt create mode 100644 test/301-abstract-protected/src/Main.java create mode 100644 test/302-float-conversion/expected.txt create mode 100644 test/302-float-conversion/info.txt create mode 100644 test/302-float-conversion/src/Main.java create mode 100644 test/303-verification-stress/build create mode 100644 test/303-verification-stress/classes-gen.c create mode 100644 test/303-verification-stress/expected.txt create mode 100644 test/303-verification-stress/info.txt create mode 100644 test/303-verification-stress/src/Main.java create mode 100644 test/304-method-tracing/expected.txt create mode 100644 test/304-method-tracing/info.txt create mode 100755 test/304-method-tracing/run create mode 100644 test/304-method-tracing/src/Main.java create mode 100644 test/305-other-fault-handler/expected.txt create mode 100644 test/305-other-fault-handler/fault_handler.cc create mode 100644 test/305-other-fault-handler/info.txt create mode 100644 test/305-other-fault-handler/src/Main.java create mode 100755 test/370-dex-v37/build create mode 100644 test/370-dex-v37/expected.txt create mode 100644 test/370-dex-v37/info.txt create mode 100644 test/370-dex-v37/src/Main.java create mode 100644 test/401-optimizing-compiler/expected.txt create mode 100644 test/401-optimizing-compiler/info.txt create mode 100644 test/401-optimizing-compiler/src/Main.java create mode 100644 test/402-optimizing-control-flow/expected.txt create mode 100644 test/402-optimizing-control-flow/info.txt create mode 100644 test/402-optimizing-control-flow/src/Main.java create mode 100644 test/403-optimizing-long/expected.txt create mode 100644 test/403-optimizing-long/info.txt create mode 100644 test/403-optimizing-long/src/Main.java create mode 100644 test/404-optimizing-allocator/expected.txt create mode 100644 test/404-optimizing-allocator/info.txt create mode 100644 test/404-optimizing-allocator/src/Main.java create mode 100644 test/405-optimizing-long-allocator/expected.txt create mode 100644 test/405-optimizing-long-allocator/info.txt create mode 100644 test/405-optimizing-long-allocator/src/Main.java create mode 100644 test/406-fields/expected.txt create mode 100644 test/406-fields/info.txt create mode 100644 test/406-fields/src/Main.java create mode 100644 test/406-fields/src/TestCase.java create mode 100644 test/407-arrays/expected.txt create mode 100644 test/407-arrays/info.txt create mode 100644 test/407-arrays/src/Main.java create mode 100644 test/407-arrays/src/TestCase.java create mode 100644 test/408-move-bug/expected.txt create mode 100644 test/408-move-bug/info.txt create mode 100644 test/408-move-bug/src/Main.java create mode 100644 test/409-materialized-condition/expected.txt create mode 100644 test/409-materialized-condition/info.txt create mode 100644 test/409-materialized-condition/src/Main.java create mode 100644 test/410-floats/expected.txt create mode 100644 test/410-floats/info.txt create mode 100644 test/410-floats/src/Main.java create mode 100644 test/411-checker-hdiv-hrem-pow2/expected.txt create mode 100644 test/411-checker-hdiv-hrem-pow2/info.txt create mode 100644 test/411-checker-hdiv-hrem-pow2/src/DivTest.java create mode 100644 test/411-checker-hdiv-hrem-pow2/src/Main.java create mode 100644 test/411-checker-hdiv-hrem-pow2/src/RemTest.java create mode 100644 test/411-optimizing-arith/expected.txt create mode 100644 test/411-optimizing-arith/info.txt create mode 100644 test/411-optimizing-arith/src/DivTest.java create mode 100644 test/411-optimizing-arith/src/Main.java create mode 100644 test/411-optimizing-arith/src/MulTest.java create mode 100644 test/411-optimizing-arith/src/NegTest.java create mode 100644 test/411-optimizing-arith/src/RemTest.java create mode 100644 test/411-optimizing-arith/src/ShiftsTest.java create mode 100644 test/411-optimizing-arith/src/SubTest.java create mode 100644 test/412-new-array/expected.txt create mode 100644 test/412-new-array/info.txt create mode 100644 test/412-new-array/smali/fill_array_data.smali create mode 100644 test/412-new-array/smali/filled_new_array.smali create mode 100644 test/412-new-array/smali/filled_new_array_verify_error.smali create mode 100644 test/412-new-array/src/Main.java create mode 100644 test/412-new-array/src/TestCase.java create mode 100644 test/413-regalloc-regression/expected.txt create mode 100644 test/413-regalloc-regression/info.txt create mode 100644 test/413-regalloc-regression/src/Main.java create mode 100644 test/414-static-fields/expected.txt create mode 100644 test/414-static-fields/info.txt create mode 100644 test/414-static-fields/src/Main.java create mode 100644 test/414-static-fields/src/Other.java create mode 100644 test/414-static-fields/src/OtherWithClinit.java create mode 100644 test/414-static-fields/src/TestCase.java create mode 100644 test/416-optimizing-arith-not/expected.txt create mode 100644 test/416-optimizing-arith-not/info.txt create mode 100644 test/416-optimizing-arith-not/smali/not.smali create mode 100644 test/416-optimizing-arith-not/src/Main.java create mode 100644 test/418-const-string/expected.txt create mode 100644 test/418-const-string/info.txt create mode 100644 test/418-const-string/src/Main.java create mode 100644 test/419-long-parameter/expected.txt create mode 100644 test/419-long-parameter/info.txt create mode 100644 test/419-long-parameter/src/Main.java create mode 100644 test/420-const-class/expected.txt create mode 100644 test/420-const-class/info.txt create mode 100644 test/420-const-class/src/Main.java create mode 100644 test/421-exceptions/expected.txt create mode 100644 test/421-exceptions/info.txt create mode 100644 test/421-exceptions/src/Main.java create mode 100644 test/421-large-frame/expected.txt create mode 100644 test/421-large-frame/info.txt create mode 100644 test/421-large-frame/src/Main.java create mode 100644 test/422-instanceof/expected.txt create mode 100644 test/422-instanceof/info.txt create mode 100644 test/422-instanceof/src/Main.java create mode 100644 test/422-type-conversion/expected.txt create mode 100644 test/422-type-conversion/info.txt create mode 100644 test/422-type-conversion/src/Main.java create mode 100644 test/423-invoke-interface/expected.txt create mode 100644 test/423-invoke-interface/info.txt create mode 100644 test/423-invoke-interface/src/Main.java create mode 100644 test/424-checkcast/expected.txt create mode 100644 test/424-checkcast/info.txt create mode 100644 test/424-checkcast/src/Main.java create mode 100644 test/425-invoke-super/expected.txt create mode 100644 test/425-invoke-super/info.txt create mode 100644 test/425-invoke-super/smali/invokesuper.smali create mode 100644 test/425-invoke-super/smali/subclass.smali create mode 100644 test/425-invoke-super/smali/superclass.smali create mode 100644 test/425-invoke-super/src/Main.java create mode 100644 test/426-monitor/expected.txt create mode 100644 test/426-monitor/info.txt create mode 100644 test/426-monitor/src/Main.java create mode 100644 test/427-bitwise/expected.txt create mode 100644 test/427-bitwise/info.txt create mode 100644 test/427-bitwise/src/Main.java create mode 100644 test/427-bounds/expected.txt create mode 100644 test/427-bounds/info.txt create mode 100644 test/427-bounds/src/Main.java create mode 100644 test/429-ssa-builder/expected.txt create mode 100644 test/429-ssa-builder/info.txt create mode 100644 test/429-ssa-builder/src/Main.java create mode 100644 test/430-live-register-slow-path/expected.txt create mode 100644 test/430-live-register-slow-path/info.txt create mode 100644 test/430-live-register-slow-path/src/Main.java create mode 100644 test/431-type-propagation/expected.txt create mode 100644 test/431-type-propagation/info.txt create mode 100644 test/431-type-propagation/smali/TypePropagation.smali create mode 100644 test/431-type-propagation/src/Main.java create mode 100644 test/432-optimizing-cmp/expected.txt create mode 100644 test/432-optimizing-cmp/info.txt create mode 100644 test/432-optimizing-cmp/smali/cmp.smali create mode 100644 test/432-optimizing-cmp/src/Main.java create mode 100644 test/433-gvn/expected.txt create mode 100644 test/433-gvn/info.txt create mode 100644 test/433-gvn/src/Main.java create mode 100644 test/434-invoke-direct/expected.txt create mode 100644 test/434-invoke-direct/info.txt create mode 100644 test/434-invoke-direct/smali/invoke.smali create mode 100644 test/434-invoke-direct/src/InvokeDirectSuper.java create mode 100644 test/434-invoke-direct/src/Main.java create mode 100644 test/434-shifter-operand/expected.txt create mode 100644 test/434-shifter-operand/info.txt create mode 100644 test/434-shifter-operand/src/Main.java create mode 100644 test/435-new-instance/expected.txt create mode 100644 test/435-new-instance/info.txt create mode 100644 test/435-new-instance/smali/instance.smali create mode 100644 test/435-new-instance/src/Main.java create mode 100644 test/435-new-instance/src/TestClass.java create mode 100644 test/435-new-instance/src/TestInterface.java create mode 100644 test/435-new-instance/src/pkg/ProtectedClass.java create mode 100644 test/435-try-finally-without-catch/expected.txt create mode 100644 test/435-try-finally-without-catch/info.txt create mode 100644 test/435-try-finally-without-catch/src/Main.java create mode 100644 test/436-rem-float/expected.txt create mode 100644 test/436-rem-float/info.txt create mode 100644 test/436-rem-float/src/Main.java create mode 100644 test/436-shift-constant/expected.txt create mode 100644 test/436-shift-constant/info.txt create mode 100644 test/436-shift-constant/src/Main.java create mode 100644 test/437-inline/expected.txt create mode 100644 test/437-inline/info.txt create mode 100644 test/437-inline/src/Main.java create mode 100644 test/438-volatile/expected.txt create mode 100644 test/438-volatile/info.txt create mode 100644 test/438-volatile/src/Main.java create mode 100644 test/439-npe/expected.txt create mode 100644 test/439-npe/info.txt create mode 100644 test/439-npe/src/Main.java create mode 100644 test/439-swap-double/expected.txt create mode 100644 test/439-swap-double/info.txt create mode 100644 test/439-swap-double/src/Main.java create mode 100644 test/440-stmp/expected.txt create mode 100644 test/440-stmp/info.txt create mode 100644 test/440-stmp/src/Main.java create mode 100644 test/441-checker-inliner/expected.txt create mode 100644 test/441-checker-inliner/info.txt create mode 100644 test/441-checker-inliner/smali/Smali.smali create mode 100644 test/441-checker-inliner/src/Main.java create mode 100644 test/442-checker-constant-folding/expected.txt create mode 100644 test/442-checker-constant-folding/info.txt create mode 100644 test/442-checker-constant-folding/smali/TestCmp.smali create mode 100644 test/442-checker-constant-folding/src/Main.java create mode 100644 test/443-not-bool-inline/expected.txt create mode 100644 test/443-not-bool-inline/info.txt create mode 100644 test/443-not-bool-inline/src/Main.java create mode 100644 test/444-checker-nce/expected.txt create mode 100644 test/444-checker-nce/info.txt create mode 100644 test/444-checker-nce/src/Main.java create mode 100644 test/445-checker-licm/expected.txt create mode 100644 test/445-checker-licm/info.txt create mode 100644 test/445-checker-licm/src/Main.java create mode 100644 test/446-checker-inliner2/expected.txt create mode 100644 test/446-checker-inliner2/info.txt create mode 100644 test/446-checker-inliner2/src/Main.java create mode 100644 test/447-checker-inliner3/expected.txt create mode 100644 test/447-checker-inliner3/info.txt create mode 100644 test/447-checker-inliner3/src/Main.java create mode 100644 test/448-multiple-returns/expected.txt create mode 100644 test/448-multiple-returns/info.txt create mode 100644 test/448-multiple-returns/smali/MultipleReturns.smali create mode 100644 test/448-multiple-returns/src/Main.java create mode 100644 test/449-checker-bce/expected.txt create mode 100644 test/449-checker-bce/info.txt create mode 100644 test/449-checker-bce/src/Main.java create mode 100644 test/450-checker-types/expected.txt create mode 100644 test/450-checker-types/info.txt create mode 100644 test/450-checker-types/smali/Main2.smali create mode 100644 test/450-checker-types/src/Main.java create mode 100644 test/451-regression-add-float/expected.txt create mode 100644 test/451-regression-add-float/info.txt create mode 100644 test/451-regression-add-float/src/Main.java create mode 100644 test/451-spill-splot/expected.txt create mode 100644 test/451-spill-splot/info.txt create mode 100644 test/451-spill-splot/src/Main.java create mode 100644 test/452-multiple-returns2/expected.txt create mode 100644 test/452-multiple-returns2/info.txt create mode 100644 test/452-multiple-returns2/smali/MultipleReturns.smali create mode 100644 test/452-multiple-returns2/src/Main.java create mode 100644 test/453-not-byte/expected.txt create mode 100644 test/453-not-byte/info.txt create mode 100644 test/453-not-byte/smali/NotByte.smali create mode 100644 test/453-not-byte/src/Main.java create mode 100644 test/454-get-vreg/expected.txt create mode 100644 test/454-get-vreg/get_vreg_jni.cc create mode 100644 test/454-get-vreg/info.txt create mode 100644 test/454-get-vreg/src/Main.java create mode 100644 test/455-checker-gvn/expected.txt create mode 100644 test/455-checker-gvn/info.txt create mode 100644 test/455-checker-gvn/smali/Smali.smali create mode 100644 test/455-checker-gvn/src/Main.java create mode 100644 test/456-baseline-array-set/expected.txt create mode 100644 test/456-baseline-array-set/info.txt create mode 100644 test/456-baseline-array-set/src/Main.java create mode 100644 test/457-regs/expected.txt create mode 100644 test/457-regs/info.txt create mode 100644 test/457-regs/regs_jni.cc create mode 100644 test/457-regs/smali/PhiLiveness.smali create mode 100644 test/457-regs/src/Main.java create mode 100644 test/458-checker-instruct-simplification/expected.txt create mode 100644 test/458-checker-instruct-simplification/info.txt create mode 100644 test/458-checker-instruct-simplification/smali/SmaliTests.smali create mode 100644 test/458-checker-instruct-simplification/smali/SmaliTests2.smali create mode 100644 test/458-checker-instruct-simplification/src/Main.java create mode 100644 test/458-long-to-fpu/expected.txt create mode 100644 test/458-long-to-fpu/info.txt create mode 100644 test/458-long-to-fpu/src/Main.java create mode 100644 test/459-dead-phi/expected.txt create mode 100644 test/459-dead-phi/info.txt create mode 100644 test/459-dead-phi/smali/EquivalentPhi.smali create mode 100644 test/459-dead-phi/src/Main.java create mode 100644 test/460-multiple-returns3/expected.txt create mode 100644 test/460-multiple-returns3/info.txt create mode 100644 test/460-multiple-returns3/smali/MultipleReturns.smali create mode 100644 test/460-multiple-returns3/src/Main.java create mode 100644 test/461-get-reference-vreg/expected.txt create mode 100644 test/461-get-reference-vreg/get_reference_vreg_jni.cc create mode 100644 test/461-get-reference-vreg/info.txt create mode 100644 test/461-get-reference-vreg/src/Main.java create mode 100644 test/462-checker-inlining-dex-files/expected.txt create mode 100644 test/462-checker-inlining-dex-files/info.txt create mode 100644 test/462-checker-inlining-dex-files/src-multidex/OtherDex.java create mode 100644 test/462-checker-inlining-dex-files/src/Main.java create mode 100644 test/463-checker-boolean-simplifier/expected.txt create mode 100644 test/463-checker-boolean-simplifier/info.txt create mode 100644 test/463-checker-boolean-simplifier/smali/Main2.smali create mode 100644 test/463-checker-boolean-simplifier/src-art/Main.java create mode 100644 test/463-checker-boolean-simplifier/src/Main.java create mode 100644 test/464-checker-inline-sharpen-calls/expected.txt create mode 100644 test/464-checker-inline-sharpen-calls/info.txt create mode 100644 test/464-checker-inline-sharpen-calls/src/Main.java create mode 100644 test/465-checker-clinit-gvn/expected.txt create mode 100644 test/465-checker-clinit-gvn/info.txt create mode 100644 test/465-checker-clinit-gvn/src/Main.java create mode 100644 test/466-get-live-vreg/expected.txt create mode 100644 test/466-get-live-vreg/get_live_vreg_jni.cc create mode 100644 test/466-get-live-vreg/info.txt create mode 100644 test/466-get-live-vreg/src/Main.java create mode 100644 test/467-regalloc-pair/expected.txt create mode 100644 test/467-regalloc-pair/info.txt create mode 100644 test/467-regalloc-pair/smali/TestCase.smali create mode 100644 test/467-regalloc-pair/src/Main.java create mode 100644 test/468-checker-bool-simplif-regression/expected.txt create mode 100644 test/468-checker-bool-simplif-regression/info.txt create mode 100644 test/468-checker-bool-simplif-regression/smali/TestCase.smali create mode 100644 test/468-checker-bool-simplif-regression/src/Main.java create mode 100644 test/469-condition-materialization/expected.txt create mode 100644 test/469-condition-materialization/info.txt create mode 100644 test/469-condition-materialization/src/Main.java create mode 100644 test/470-huge-method/expected.txt create mode 100644 test/470-huge-method/info.txt create mode 100644 test/470-huge-method/src/Main.java create mode 100644 test/471-deopt-environment/expected.txt create mode 100644 test/471-deopt-environment/info.txt create mode 100644 test/471-deopt-environment/src/Main.java create mode 100644 test/471-uninitialized-locals/expected.txt create mode 100644 test/471-uninitialized-locals/info.txt create mode 100644 test/471-uninitialized-locals/smali/Test.smali create mode 100644 test/471-uninitialized-locals/src/Main.java create mode 100644 test/472-type-propagation/expected.txt create mode 100644 test/472-type-propagation/info.txt create mode 100644 test/472-type-propagation/src/Main.java create mode 100644 test/472-unreachable-if-regression/expected.txt create mode 100644 test/472-unreachable-if-regression/info.txt create mode 100644 test/472-unreachable-if-regression/smali/Test.smali create mode 100644 test/472-unreachable-if-regression/src/Main.java create mode 100644 test/473-checker-inliner-constants/expected.txt create mode 100644 test/473-checker-inliner-constants/info.txt create mode 100644 test/473-checker-inliner-constants/src/Main.java create mode 100644 test/473-remove-dead-block/expected.txt create mode 100644 test/473-remove-dead-block/info.txt create mode 100644 test/473-remove-dead-block/src/Main.java create mode 100644 test/474-checker-boolean-input/expected.txt create mode 100644 test/474-checker-boolean-input/info.txt create mode 100644 test/474-checker-boolean-input/src/Main.java create mode 100644 test/474-fp-sub-neg/expected.txt create mode 100644 test/474-fp-sub-neg/info.txt create mode 100644 test/474-fp-sub-neg/src/Main.java create mode 100644 test/475-regression-inliner-ids/expected.txt create mode 100644 test/475-regression-inliner-ids/info.txt create mode 100644 test/475-regression-inliner-ids/smali/TestCase.smali create mode 100644 test/475-regression-inliner-ids/src/Main.java create mode 100644 test/475-simplify-mul-zero/expected.txt create mode 100644 test/475-simplify-mul-zero/info.txt create mode 100644 test/475-simplify-mul-zero/src/Main.java create mode 100644 test/476-checker-ctor-fence-redun-elim/expected.txt create mode 100644 test/476-checker-ctor-fence-redun-elim/info.txt create mode 100644 test/476-checker-ctor-fence-redun-elim/src/Main.java create mode 100644 test/476-checker-ctor-memory-barrier/expected.txt create mode 100644 test/476-checker-ctor-memory-barrier/info.txt create mode 100644 test/476-checker-ctor-memory-barrier/src/Main.java create mode 100644 test/476-clinit-inline-static-invoke/expected.txt create mode 100644 test/476-clinit-inline-static-invoke/info.txt create mode 100644 test/476-clinit-inline-static-invoke/src/Main.java create mode 100644 test/477-checker-bound-type/expected.txt create mode 100644 test/477-checker-bound-type/info.txt create mode 100644 test/477-checker-bound-type/src/Main.java create mode 100644 test/477-long-2-float-convers-precision/expected.txt create mode 100644 test/477-long-2-float-convers-precision/info.txt create mode 100644 test/477-long-2-float-convers-precision/src/Main.java create mode 100644 test/478-checker-clinit-check-pruning/expected.txt create mode 100644 test/478-checker-clinit-check-pruning/info.txt create mode 100644 test/478-checker-clinit-check-pruning/src/Main.java create mode 100644 test/478-checker-inline-noreturn/expected.txt create mode 100644 test/478-checker-inline-noreturn/info.txt create mode 100644 test/478-checker-inline-noreturn/src/Main.java create mode 100644 test/478-checker-inliner-nested-loop/expected.txt create mode 100644 test/478-checker-inliner-nested-loop/info.txt create mode 100644 test/478-checker-inliner-nested-loop/src/Main.java create mode 100644 test/479-regression-implicit-null-check/expected.txt create mode 100644 test/479-regression-implicit-null-check/info.txt create mode 100644 test/479-regression-implicit-null-check/src/Main.java create mode 100644 test/480-checker-dead-blocks/expected.txt create mode 100644 test/480-checker-dead-blocks/info.txt create mode 100644 test/480-checker-dead-blocks/smali/Smali.smali create mode 100644 test/480-checker-dead-blocks/src/Main.java create mode 100644 test/481-regression-phi-cond/expected.txt create mode 100644 test/481-regression-phi-cond/info.txt create mode 100644 test/481-regression-phi-cond/src/Main.java create mode 100644 test/482-checker-loop-back-edge-use/expected.txt create mode 100644 test/482-checker-loop-back-edge-use/info.txt create mode 100644 test/482-checker-loop-back-edge-use/src/Main.java create mode 100644 test/483-dce-block/expected.txt create mode 100644 test/483-dce-block/info.txt create mode 100644 test/483-dce-block/src/Main.java create mode 100644 test/484-checker-register-hints/expected.txt create mode 100644 test/484-checker-register-hints/info.txt create mode 100644 test/484-checker-register-hints/smali/Smali.smali create mode 100644 test/484-checker-register-hints/src/Main.java create mode 100644 test/485-checker-dce-loop-update/expected.txt create mode 100644 test/485-checker-dce-loop-update/info.txt create mode 100644 test/485-checker-dce-loop-update/smali/TestCase.smali create mode 100644 test/485-checker-dce-loop-update/src/Main.java create mode 100644 test/485-checker-dce-switch/expected.txt create mode 100644 test/485-checker-dce-switch/info.txt create mode 100644 test/485-checker-dce-switch/src/Main.java create mode 100644 test/486-checker-must-do-null-check/expected.txt create mode 100644 test/486-checker-must-do-null-check/info.txt create mode 100644 test/486-checker-must-do-null-check/src/Main.java create mode 100644 test/487-checker-inline-calls/expected.txt create mode 100644 test/487-checker-inline-calls/info.txt create mode 100644 test/487-checker-inline-calls/src/Main.java create mode 100644 test/488-checker-inline-recursive-calls/expected.txt create mode 100644 test/488-checker-inline-recursive-calls/info.txt create mode 100644 test/488-checker-inline-recursive-calls/src/Main.java create mode 100644 test/489-current-method-regression/expected.txt create mode 100644 test/489-current-method-regression/info.txt create mode 100644 test/489-current-method-regression/src/Main.java create mode 100644 test/490-checker-inline/expected.txt create mode 100644 test/490-checker-inline/info.txt create mode 100644 test/490-checker-inline/src/Main.java create mode 100644 test/491-current-method/expected.txt create mode 100644 test/491-current-method/info.txt create mode 100644 test/491-current-method/src/Main.java create mode 100644 test/492-checker-inline-invoke-interface/expected.txt create mode 100644 test/492-checker-inline-invoke-interface/info.txt create mode 100644 test/492-checker-inline-invoke-interface/src/Main.java create mode 100644 test/493-checker-inline-invoke-interface/expected.txt create mode 100644 test/493-checker-inline-invoke-interface/info.txt create mode 100644 test/493-checker-inline-invoke-interface/src/Main.java create mode 100644 test/494-checker-instanceof-tests/expected.txt create mode 100644 test/494-checker-instanceof-tests/info.txt create mode 100644 test/494-checker-instanceof-tests/src/Main.java create mode 100644 test/495-checker-checkcast-tests/expected.txt create mode 100644 test/495-checker-checkcast-tests/info.txt create mode 100644 test/495-checker-checkcast-tests/src/Main.java create mode 100644 test/496-checker-inlining-class-loader/expected.txt create mode 100644 test/496-checker-inlining-class-loader/info.txt create mode 100644 test/496-checker-inlining-class-loader/src/FirstSeenByMyClassLoader.java create mode 100644 test/496-checker-inlining-class-loader/src/Main.java create mode 100644 test/497-inlining-and-class-loader/clear_dex_cache.cc create mode 100644 test/497-inlining-and-class-loader/expected.txt create mode 100644 test/497-inlining-and-class-loader/info.txt create mode 100644 test/497-inlining-and-class-loader/src/Level1.java create mode 100644 test/497-inlining-and-class-loader/src/Main.java create mode 100644 test/498-type-propagation/expected.txt create mode 100644 test/498-type-propagation/info.txt create mode 100644 test/498-type-propagation/smali/TypePropagation.smali create mode 100644 test/498-type-propagation/src/Main.java create mode 100644 test/499-bce-phi-array-length/expected.txt create mode 100644 test/499-bce-phi-array-length/info.txt create mode 100644 test/499-bce-phi-array-length/src/Main.java create mode 100644 test/500-instanceof/expected.txt create mode 100644 test/500-instanceof/info.txt create mode 100644 test/500-instanceof/src/Main.java create mode 100644 test/501-null-constant-dce/expected.txt create mode 100644 test/501-null-constant-dce/info.txt create mode 100644 test/501-null-constant-dce/smali/DCE.smali create mode 100644 test/501-null-constant-dce/src/Main.java create mode 100644 test/501-regression-packed-switch/expected.txt create mode 100644 test/501-regression-packed-switch/info.txt create mode 100644 test/501-regression-packed-switch/smali/Test.smali create mode 100644 test/501-regression-packed-switch/src/Main.java create mode 100644 test/503-dead-instructions/expected.txt create mode 100644 test/503-dead-instructions/info.txt create mode 100644 test/503-dead-instructions/smali/DeadInstructions.smali create mode 100644 test/503-dead-instructions/src/Main.java create mode 100644 test/504-regression-baseline-entry/expected.txt create mode 100644 test/504-regression-baseline-entry/info.txt create mode 100644 test/504-regression-baseline-entry/smali/Test.smali create mode 100644 test/504-regression-baseline-entry/src/Main.java create mode 100644 test/505-simplifier-type-propagation/expected.txt create mode 100644 test/505-simplifier-type-propagation/info.txt create mode 100644 test/505-simplifier-type-propagation/src/Main.java create mode 100644 test/506-verify-aput/expected.txt create mode 100644 test/506-verify-aput/info.txt create mode 100644 test/506-verify-aput/smali/VerifyAPut1.smali create mode 100644 test/506-verify-aput/smali/VerifyAPut2.smali create mode 100644 test/506-verify-aput/src/Main.java create mode 100644 test/507-boolean-test/expected.txt create mode 100644 test/507-boolean-test/info.txt create mode 100644 test/507-boolean-test/src/Main.java create mode 100644 test/507-referrer/expected.txt create mode 100644 test/507-referrer/info.txt create mode 100644 test/507-referrer/src/Main.java create mode 100644 test/507-referrer/src/p1/InPackage.java create mode 100644 test/508-checker-disassembly/expected.txt create mode 100644 test/508-checker-disassembly/info.txt create mode 100644 test/508-checker-disassembly/src/Main.java create mode 100644 test/508-referrer-method/expected.txt create mode 100644 test/508-referrer-method/info.txt create mode 100644 test/508-referrer-method/src/Main.java create mode 100644 test/508-referrer-method/src/p1/InPackage.java create mode 100644 test/508-referrer-method/src/p1/PackagePrivateA.java create mode 100644 test/508-referrer-method/src/p1/PublicB.java create mode 100644 test/508-referrer-method/src/p1/PublicC.java create mode 100644 test/509-pre-header/expected.txt create mode 100644 test/509-pre-header/info.txt create mode 100644 test/509-pre-header/smali/PreHeader.smali create mode 100644 test/509-pre-header/src/Main.java create mode 100644 test/510-checker-try-catch/expected.txt create mode 100644 test/510-checker-try-catch/info.txt create mode 100644 test/510-checker-try-catch/smali/Builder.smali create mode 100644 test/510-checker-try-catch/smali/RegisterAllocator.smali create mode 100644 test/510-checker-try-catch/smali/Runtime.smali create mode 100644 test/510-checker-try-catch/smali/SsaBuilder.smali create mode 100644 test/510-checker-try-catch/src/Main.java create mode 100644 test/511-clinit-interface/expected.txt create mode 100644 test/511-clinit-interface/info.txt create mode 100644 test/511-clinit-interface/smali/BogusInterface.smali create mode 100644 test/511-clinit-interface/src/Main.java create mode 100644 test/513-array-deopt/expected.txt create mode 100644 test/513-array-deopt/info.txt create mode 100644 test/513-array-deopt/src/Main.java create mode 100644 test/514-shifts/expected.txt create mode 100644 test/514-shifts/info.txt create mode 100644 test/514-shifts/src/Main.java create mode 100644 test/515-dce-dominator/expected.txt create mode 100644 test/515-dce-dominator/info.txt create mode 100644 test/515-dce-dominator/smali/Dominator.smali create mode 100644 test/515-dce-dominator/src/Main.java create mode 100644 test/516-dead-move-result/expected.txt create mode 100644 test/516-dead-move-result/info.txt create mode 100644 test/516-dead-move-result/smali/MoveResult.smali create mode 100644 test/516-dead-move-result/src/Main.java create mode 100644 test/517-checker-builder-fallthrough/expected.txt create mode 100644 test/517-checker-builder-fallthrough/info.txt create mode 100644 test/517-checker-builder-fallthrough/smali/TestCase.smali create mode 100644 test/517-checker-builder-fallthrough/src/Main.java create mode 100644 test/518-null-array-get/expected.txt create mode 100644 test/518-null-array-get/info.txt create mode 100644 test/518-null-array-get/smali/NullArrayFailInt2Object.smali create mode 100644 test/518-null-array-get/smali/NullArrayFailObject2Int.smali create mode 100644 test/518-null-array-get/smali/NullArraySuccessInt.smali create mode 100644 test/518-null-array-get/smali/NullArraySuccessInt2Float.smali create mode 100644 test/518-null-array-get/smali/NullArraySuccessRef.smali create mode 100644 test/518-null-array-get/smali/NullArraySuccessShort.smali create mode 100644 test/518-null-array-get/src/Main.java create mode 100644 test/519-bound-load-class/expected.txt create mode 100644 test/519-bound-load-class/info.txt create mode 100644 test/519-bound-load-class/src/Main.java create mode 100644 test/520-equivalent-phi/expected.txt create mode 100644 test/520-equivalent-phi/info.txt create mode 100644 test/520-equivalent-phi/smali/Equivalent.smali create mode 100644 test/520-equivalent-phi/src/Main.java create mode 100644 test/521-checker-array-set-null/expected.txt create mode 100644 test/521-checker-array-set-null/info.txt create mode 100644 test/521-checker-array-set-null/src/Main.java create mode 100644 test/521-regression-integer-field-set/expected.txt create mode 100644 test/521-regression-integer-field-set/info.txt create mode 100644 test/521-regression-integer-field-set/src/Main.java create mode 100644 test/522-checker-regression-monitor-exit/expected.txt create mode 100644 test/522-checker-regression-monitor-exit/info.txt create mode 100644 test/522-checker-regression-monitor-exit/smali/Test.smali create mode 100644 test/522-checker-regression-monitor-exit/src/Main.java create mode 100644 test/523-checker-can-throw-regression/expected.txt create mode 100644 test/523-checker-can-throw-regression/info.txt create mode 100644 test/523-checker-can-throw-regression/smali/Test.smali create mode 100644 test/523-checker-can-throw-regression/src/Main.java create mode 100644 test/524-boolean-simplifier-regression/expected.txt create mode 100644 test/524-boolean-simplifier-regression/info.txt create mode 100644 test/524-boolean-simplifier-regression/src/Main.java create mode 100644 test/525-checker-arrays-fields1/expected.txt create mode 100644 test/525-checker-arrays-fields1/info.txt create mode 100644 test/525-checker-arrays-fields1/src/Main.java create mode 100644 test/525-checker-arrays-fields2/expected.txt create mode 100644 test/525-checker-arrays-fields2/info.txt create mode 100644 test/525-checker-arrays-fields2/src/Main.java create mode 100644 test/526-checker-caller-callee-regs/expected.txt create mode 100644 test/526-checker-caller-callee-regs/info.txt create mode 100644 test/526-checker-caller-callee-regs/src/Main.java create mode 100644 test/526-long-regalloc/expected.txt create mode 100644 test/526-long-regalloc/info.txt create mode 100644 test/526-long-regalloc/src/Main.java create mode 100644 test/527-checker-array-access-simd/expected.txt create mode 100644 test/527-checker-array-access-simd/info.txt create mode 100644 test/527-checker-array-access-simd/src/Main.java create mode 100644 test/527-checker-array-access-split/expected.txt create mode 100644 test/527-checker-array-access-split/info.txt create mode 100644 test/527-checker-array-access-split/src/Main.java create mode 100644 test/528-long-hint/expected.txt create mode 100644 test/528-long-hint/info.txt create mode 100644 test/528-long-hint/src/Main.java create mode 100644 test/529-checker-unresolved/expected.txt create mode 100644 test/529-checker-unresolved/info.txt create mode 100644 test/529-checker-unresolved/run create mode 100644 test/529-checker-unresolved/src-dex2oat-unresolved/UnresolvedClass.java create mode 100644 test/529-checker-unresolved/src-dex2oat-unresolved/UnresolvedInterface.java create mode 100644 test/529-checker-unresolved/src-dex2oat-unresolved/UnresolvedSuperClass.java create mode 100644 test/529-checker-unresolved/src/Main.java create mode 100644 test/529-long-split/expected.txt create mode 100644 test/529-long-split/info.txt create mode 100644 test/529-long-split/src/Main.java create mode 100644 test/530-checker-loops1/expected.txt create mode 100644 test/530-checker-loops1/info.txt create mode 100644 test/530-checker-loops1/src/Main.java create mode 100644 test/530-checker-loops2/expected.txt create mode 100644 test/530-checker-loops2/info.txt create mode 100644 test/530-checker-loops2/src/Main.java create mode 100644 test/530-checker-loops3/expected.txt create mode 100644 test/530-checker-loops3/info.txt create mode 100644 test/530-checker-loops3/src/Main.java create mode 100644 test/530-checker-loops4/expected.txt create mode 100644 test/530-checker-loops4/info.txt create mode 100644 test/530-checker-loops4/src/Main.java create mode 100644 test/530-checker-loops5/expected.txt create mode 100644 test/530-checker-loops5/info.txt create mode 100644 test/530-checker-loops5/src/Main.java create mode 100644 test/530-checker-lse-ctor-fences/expected.txt create mode 100644 test/530-checker-lse-ctor-fences/info.txt create mode 100644 test/530-checker-lse-ctor-fences/smali/Smali.smali create mode 100644 test/530-checker-lse-ctor-fences/src/Main.java create mode 100644 test/530-checker-lse-simd/expected.txt create mode 100644 test/530-checker-lse-simd/info.txt create mode 100644 test/530-checker-lse-simd/src/Main.java create mode 100644 test/530-checker-lse/expected.txt create mode 100644 test/530-checker-lse/info.txt create mode 100644 test/530-checker-lse/smali/Main.smali create mode 100644 test/530-checker-lse/src/Main.java create mode 100644 test/530-checker-lse2/expected.txt create mode 100644 test/530-checker-lse2/info.txt create mode 100644 test/530-checker-lse2/src/Main.java create mode 100644 test/530-checker-lse3/expected.txt create mode 100644 test/530-checker-lse3/info.txt create mode 100644 test/530-checker-lse3/smali/StoreLoad.smali create mode 100644 test/530-checker-lse3/src/Main.java create mode 100644 test/530-checker-peel-unroll/expected.txt create mode 100644 test/530-checker-peel-unroll/info.txt create mode 100644 test/530-checker-peel-unroll/smali/PeelUnroll.smali create mode 100644 test/530-checker-peel-unroll/src/Main.java create mode 100644 test/530-checker-regression-reftyp-final/expected.txt create mode 100644 test/530-checker-regression-reftyp-final/info.txt create mode 100644 test/530-checker-regression-reftyp-final/smali/TestCase.smali create mode 100644 test/530-checker-regression-reftyp-final/src/Main.java create mode 100644 test/530-instanceof-checkcast/expected.txt create mode 100644 test/530-instanceof-checkcast/info.txt create mode 100644 test/530-instanceof-checkcast/src/Main.java create mode 100644 test/530-regression-lse/expected.txt create mode 100644 test/530-regression-lse/info.txt create mode 100644 test/530-regression-lse/src/Main.java create mode 100644 test/531-regression-debugphi/expected.txt create mode 100644 test/531-regression-debugphi/info.txt create mode 100644 test/531-regression-debugphi/smali/TestCase.smali create mode 100644 test/531-regression-debugphi/src/Main.java create mode 100644 test/532-checker-nonnull-arrayset/expected.txt create mode 100644 test/532-checker-nonnull-arrayset/info.txt create mode 100644 test/532-checker-nonnull-arrayset/src/Main.java create mode 100644 test/533-regression-debugphi/expected.txt create mode 100644 test/533-regression-debugphi/info.txt create mode 100644 test/533-regression-debugphi/smali/TestCase.smali create mode 100644 test/533-regression-debugphi/src/Main.java create mode 100644 test/534-checker-bce-deoptimization/expected.txt create mode 100644 test/534-checker-bce-deoptimization/info.txt create mode 100644 test/534-checker-bce-deoptimization/src/Main.java create mode 100644 test/535-deopt-and-inlining/expected.txt create mode 100644 test/535-deopt-and-inlining/info.txt create mode 100644 test/535-deopt-and-inlining/src/Main.java create mode 100644 test/535-regression-const-val/expected.txt create mode 100644 test/535-regression-const-val/info.txt create mode 100644 test/535-regression-const-val/smali/TestCase.smali create mode 100644 test/535-regression-const-val/src/Main.java create mode 100644 test/536-checker-intrinsic-optimization/expected.txt create mode 100644 test/536-checker-intrinsic-optimization/info.txt create mode 100644 test/536-checker-intrinsic-optimization/smali/SmaliTests.smali create mode 100644 test/536-checker-intrinsic-optimization/src/Main.java create mode 100644 test/536-checker-needs-access-check/expected.txt create mode 100644 test/536-checker-needs-access-check/info.txt create mode 100644 test/536-checker-needs-access-check/src/Main.java create mode 100644 test/536-checker-needs-access-check/src/other/InaccessibleClass.java create mode 100644 test/536-checker-needs-access-check/src/other/InaccessibleClassProxy.java create mode 100644 test/536-checker-needs-access-check/src2/other/InaccessibleClass.java create mode 100644 test/536-checker-needs-access-check/src2/other/InaccessibleClassProxy.java create mode 100644 test/537-checker-arraycopy/expected.txt create mode 100644 test/537-checker-arraycopy/info.txt create mode 100644 test/537-checker-arraycopy/src/Main.java create mode 100644 test/537-checker-debuggable/expected.txt create mode 100644 test/537-checker-debuggable/info.txt create mode 100644 test/537-checker-debuggable/smali/TestCase.smali create mode 100644 test/537-checker-debuggable/src/Main.java create mode 100644 test/537-checker-inline-and-unverified/expected.txt create mode 100644 test/537-checker-inline-and-unverified/info.txt create mode 100644 test/537-checker-inline-and-unverified/src/Main.java create mode 100644 test/537-checker-inline-and-unverified/src/other/InaccessibleClass.java create mode 100644 test/537-checker-inline-and-unverified/src2/other/InaccessibleClass.java create mode 100644 test/537-checker-jump-over-jump/expected.txt create mode 100644 test/537-checker-jump-over-jump/info.txt create mode 100644 test/537-checker-jump-over-jump/src/Main.java create mode 100644 test/538-checker-embed-constants/expected.txt create mode 100644 test/538-checker-embed-constants/info.txt create mode 100644 test/538-checker-embed-constants/src/Main.java create mode 100644 test/540-checker-rtp-bug/expected.txt create mode 100644 test/540-checker-rtp-bug/info.txt create mode 100644 test/540-checker-rtp-bug/src/Main.java create mode 100644 test/541-regression-inlined-deopt/expected.txt create mode 100644 test/541-regression-inlined-deopt/info.txt create mode 100644 test/541-regression-inlined-deopt/smali/TestCase.smali create mode 100644 test/541-regression-inlined-deopt/src/Main.java create mode 100644 test/542-bitfield-rotates/expected.txt create mode 100644 test/542-bitfield-rotates/info.txt create mode 100644 test/542-bitfield-rotates/src/Main.java create mode 100644 test/542-inline-trycatch/expected.txt create mode 100644 test/542-inline-trycatch/info.txt create mode 100644 test/542-inline-trycatch/src/Main.java create mode 100644 test/542-unresolved-access-check/expected.txt create mode 100644 test/542-unresolved-access-check/info.txt create mode 100644 test/542-unresolved-access-check/src/Main.java create mode 100644 test/542-unresolved-access-check/src/p1/InP1.java create mode 100644 test/542-unresolved-access-check/src/p1/OtherInP1.java create mode 100644 test/542-unresolved-access-check/src/p1/PlaceHolder.java create mode 100644 test/543-checker-dce-trycatch/expected.txt create mode 100644 test/543-checker-dce-trycatch/info.txt create mode 100644 test/543-checker-dce-trycatch/smali/TestCase.smali create mode 100644 test/543-checker-dce-trycatch/src/Main.java create mode 100644 test/543-env-long-ref/env_long_ref.cc create mode 100644 test/543-env-long-ref/expected.txt create mode 100644 test/543-env-long-ref/info.txt create mode 100644 test/543-env-long-ref/smali/TestCase.smali create mode 100644 test/543-env-long-ref/src/Main.java create mode 100644 test/545-tracing-and-jit/expected.txt create mode 100644 test/545-tracing-and-jit/info.txt create mode 100644 test/545-tracing-and-jit/src/Main.java create mode 100644 test/546-regression-simplify-catch/expected.txt create mode 100644 test/546-regression-simplify-catch/info.txt create mode 100644 test/546-regression-simplify-catch/smali/TestCase.smali create mode 100644 test/546-regression-simplify-catch/src/Main.java create mode 100644 test/547-regression-trycatch-critic-edge/expected.txt create mode 100644 test/547-regression-trycatch-critic-edge/info.txt create mode 100644 test/547-regression-trycatch-critic-edge/smali/TestCase.smali create mode 100644 test/547-regression-trycatch-critic-edge/src/Main.java create mode 100644 test/548-checker-inlining-and-dce/expected.txt create mode 100644 test/548-checker-inlining-and-dce/info.txt create mode 100644 test/548-checker-inlining-and-dce/src/Main.java create mode 100644 test/549-checker-types-merge/expected.txt create mode 100644 test/549-checker-types-merge/info.txt create mode 100644 test/549-checker-types-merge/src/Main.java create mode 100644 test/550-checker-multiply-accumulate/expected.txt create mode 100644 test/550-checker-multiply-accumulate/info.txt create mode 100644 test/550-checker-multiply-accumulate/src/Main.java create mode 100644 test/550-checker-regression-wide-store/expected.txt create mode 100644 test/550-checker-regression-wide-store/info.txt create mode 100644 test/550-checker-regression-wide-store/smali/TestCase.smali create mode 100644 test/550-checker-regression-wide-store/src/Main.java create mode 100644 test/550-new-instance-clinit/expected.txt create mode 100644 test/550-new-instance-clinit/info.txt create mode 100644 test/550-new-instance-clinit/src/Main.java create mode 100644 test/551-checker-clinit/expected.txt create mode 100644 test/551-checker-clinit/info.txt create mode 100644 test/551-checker-clinit/src/Main.java create mode 100644 test/551-checker-shifter-operand/expected.txt create mode 100644 test/551-checker-shifter-operand/info.txt create mode 100644 test/551-checker-shifter-operand/src/Main.java create mode 100644 test/551-implicit-null-checks/expected.txt create mode 100644 test/551-implicit-null-checks/info.txt create mode 100644 test/551-implicit-null-checks/src/Main.java create mode 100644 test/551-invoke-super/expected.txt create mode 100644 test/551-invoke-super/info.txt create mode 100644 test/551-invoke-super/smali/invokesuper.smali create mode 100644 test/551-invoke-super/smali/superclass.smali create mode 100644 test/551-invoke-super/src/Main.java create mode 100644 test/552-checker-primitive-typeprop/expected.txt create mode 100644 test/552-checker-primitive-typeprop/info.txt create mode 100644 test/552-checker-primitive-typeprop/smali/ArrayGet.smali create mode 100644 test/552-checker-primitive-typeprop/smali/ArraySet.smali create mode 100644 test/552-checker-primitive-typeprop/smali/SsaBuilder.smali create mode 100644 test/552-checker-primitive-typeprop/smali/TypePropagation.smali create mode 100644 test/552-checker-primitive-typeprop/src/Main.java create mode 100644 test/552-checker-sharpening/expected.txt create mode 100644 test/552-checker-sharpening/info.txt create mode 100644 test/552-checker-sharpening/src/Main.java create mode 100644 test/552-checker-x86-avx2-bit-manipulation/expected.txt create mode 100644 test/552-checker-x86-avx2-bit-manipulation/info.txt create mode 100644 test/552-checker-x86-avx2-bit-manipulation/src/Main.java create mode 100644 test/552-invoke-non-existent-super/expected.txt create mode 100644 test/552-invoke-non-existent-super/info.txt create mode 100644 test/552-invoke-non-existent-super/smali/invokesuper.smali create mode 100644 test/552-invoke-non-existent-super/smali/superclass.smali create mode 100644 test/552-invoke-non-existent-super/src/Main.java create mode 100644 test/553-invoke-super/expected.txt create mode 100644 test/553-invoke-super/info.txt create mode 100644 test/553-invoke-super/smali/invokesuper.smali create mode 100644 test/553-invoke-super/src/Main.java create mode 100644 test/553-invoke-super/src/SuperClass.java create mode 100644 test/554-checker-rtp-checkcast/expected.txt create mode 100644 test/554-checker-rtp-checkcast/info.txt create mode 100644 test/554-checker-rtp-checkcast/src/Main.java create mode 100644 test/555-UnsafeGetLong-regression/expected.txt create mode 100644 test/555-UnsafeGetLong-regression/info.txt create mode 100644 test/555-UnsafeGetLong-regression/src/Main.java create mode 100644 test/556-invoke-super/expected.txt create mode 100644 test/556-invoke-super/info.txt create mode 100644 test/556-invoke-super/smali/invokesuper.smali create mode 100644 test/556-invoke-super/src-multidex/SuperClass.java create mode 100644 test/556-invoke-super/src/Main.java create mode 100644 test/557-checker-instruct-simplifier-ror/expected.txt create mode 100644 test/557-checker-instruct-simplifier-ror/info.txt create mode 100644 test/557-checker-instruct-simplifier-ror/src/Main.java create mode 100644 test/557-checker-ref-equivalent/expected.txt create mode 100644 test/557-checker-ref-equivalent/info.txt create mode 100644 test/557-checker-ref-equivalent/smali/TestCase.smali create mode 100644 test/557-checker-ref-equivalent/src/Main.java create mode 100644 test/558-switch/expected.txt create mode 100644 test/558-switch/info.txt create mode 100644 test/558-switch/src/Main.java create mode 100644 test/559-bce-ssa/expected.txt create mode 100644 test/559-bce-ssa/info.txt create mode 100644 test/559-bce-ssa/src/Main.java create mode 100644 test/559-checker-irreducible-loop/expected.txt create mode 100644 test/559-checker-irreducible-loop/info.txt create mode 100644 test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali create mode 100644 test/559-checker-irreducible-loop/src/Main.java create mode 100644 test/559-checker-rtp-ifnotnull/expected.txt create mode 100644 test/559-checker-rtp-ifnotnull/info.txt create mode 100644 test/559-checker-rtp-ifnotnull/src/Main.java create mode 100644 test/560-packed-switch/expected.txt create mode 100644 test/560-packed-switch/info.txt create mode 100644 test/560-packed-switch/src/Main.java create mode 100644 test/561-divrem/expected.txt create mode 100644 test/561-divrem/info.txt create mode 100644 test/561-divrem/src/Main.java create mode 100644 test/561-shared-slowpaths/expected.txt create mode 100644 test/561-shared-slowpaths/info.txt create mode 100644 test/561-shared-slowpaths/src/Main.java create mode 100644 test/562-bce-preheader/expected.txt create mode 100644 test/562-bce-preheader/info.txt create mode 100644 test/562-bce-preheader/src/Main.java create mode 100644 test/562-checker-no-intermediate/expected.txt create mode 100644 test/562-checker-no-intermediate/info.txt create mode 100644 test/562-checker-no-intermediate/src/Main.java create mode 100644 test/563-checker-fakestring/expected.txt create mode 100644 test/563-checker-fakestring/info.txt create mode 100644 test/563-checker-fakestring/smali/TestCase.smali create mode 100644 test/563-checker-fakestring/src/Main.java create mode 100644 test/563-checker-invoke-super/expected.txt create mode 100644 test/563-checker-invoke-super/info.txt create mode 100644 test/563-checker-invoke-super/src/Main.java create mode 100644 test/564-checker-bitcount/expected.txt create mode 100644 test/564-checker-bitcount/info.txt create mode 100644 test/564-checker-bitcount/src/Main.java create mode 100644 test/564-checker-inline-loop/expected.txt create mode 100644 test/564-checker-inline-loop/info.txt create mode 100644 test/564-checker-inline-loop/src/Main.java create mode 100644 test/564-checker-irreducible-loop/expected.txt create mode 100644 test/564-checker-irreducible-loop/info.txt create mode 100644 test/564-checker-irreducible-loop/smali/IrreducibleLoop.smali create mode 100644 test/564-checker-irreducible-loop/src/Main.java create mode 100644 test/564-checker-negbitwise/expected.txt create mode 100644 test/564-checker-negbitwise/info.txt create mode 100644 test/564-checker-negbitwise/src/Main.java create mode 100644 test/565-checker-condition-liveness/expected.txt create mode 100644 test/565-checker-condition-liveness/info.txt create mode 100644 test/565-checker-condition-liveness/src/Main.java create mode 100644 test/565-checker-doublenegbitwise/expected.txt create mode 100644 test/565-checker-doublenegbitwise/info.txt create mode 100644 test/565-checker-doublenegbitwise/smali/SmaliTests.smali create mode 100644 test/565-checker-doublenegbitwise/src/Main.java create mode 100644 test/565-checker-irreducible-loop/expected.txt create mode 100644 test/565-checker-irreducible-loop/info.txt create mode 100644 test/565-checker-irreducible-loop/smali/IrreducibleLoop.smali create mode 100644 test/565-checker-irreducible-loop/src/Main.java create mode 100644 test/565-checker-rotate/expected.txt create mode 100644 test/565-checker-rotate/info.txt create mode 100644 test/565-checker-rotate/smali/Main2.smali create mode 100644 test/565-checker-rotate/src-art/Main.java create mode 100644 test/565-checker-rotate/src/Main.java create mode 100644 test/566-checker-codegen-select/expected.txt create mode 100644 test/566-checker-codegen-select/info.txt create mode 100644 test/566-checker-codegen-select/src/Main.java create mode 100644 test/566-checker-signum/expected.txt create mode 100644 test/566-checker-signum/info.txt create mode 100644 test/566-checker-signum/smali/Main2.smali create mode 100644 test/566-checker-signum/src-art/Main.java create mode 100644 test/566-checker-signum/src/Main.java create mode 100644 test/566-polymorphic-inlining/expected.txt create mode 100644 test/566-polymorphic-inlining/info.txt create mode 100644 test/566-polymorphic-inlining/polymorphic_inline.cc create mode 100644 test/566-polymorphic-inlining/run create mode 100644 test/566-polymorphic-inlining/src/Main.java create mode 100644 test/567-checker-compare/expected.txt create mode 100644 test/567-checker-compare/info.txt create mode 100644 test/567-checker-compare/smali/Smali.smali create mode 100644 test/567-checker-compare/src/Main.java create mode 100644 test/568-checker-onebit/expected.txt create mode 100644 test/568-checker-onebit/info.txt create mode 100644 test/568-checker-onebit/src/Main.java create mode 100644 test/569-checker-pattern-replacement/expected.txt create mode 100644 test/569-checker-pattern-replacement/info.txt create mode 100755 test/569-checker-pattern-replacement/run create mode 100644 test/569-checker-pattern-replacement/src-multidex/Base.java create mode 100644 test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java create mode 100644 test/569-checker-pattern-replacement/src-multidex/Derived.java create mode 100644 test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java create mode 100644 test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java create mode 100644 test/569-checker-pattern-replacement/src-multidex/Second.java create mode 100644 test/569-checker-pattern-replacement/src/BaseInMainDex.java create mode 100644 test/569-checker-pattern-replacement/src/Main.java create mode 100644 test/570-checker-osr-locals/expected.txt create mode 100644 test/570-checker-osr-locals/info.txt create mode 100755 test/570-checker-osr-locals/run create mode 100644 test/570-checker-osr-locals/smali/WeirdLoop.smali create mode 100644 test/570-checker-osr-locals/src/Main.java create mode 100644 test/570-checker-osr/expected.txt create mode 100644 test/570-checker-osr/info.txt create mode 100644 test/570-checker-osr/osr.cc create mode 100755 test/570-checker-osr/run create mode 100644 test/570-checker-osr/smali/Osr.smali create mode 100644 test/570-checker-osr/src/DeoptimizationController.java create mode 100644 test/570-checker-osr/src/Main.java create mode 100644 test/570-checker-select/expected.txt create mode 100644 test/570-checker-select/info.txt create mode 100644 test/570-checker-select/src/Main.java create mode 100644 test/571-irreducible-loop/expected.txt create mode 100644 test/571-irreducible-loop/info.txt create mode 100644 test/571-irreducible-loop/smali/IrreducibleLoop.smali create mode 100644 test/571-irreducible-loop/src/Main.java create mode 100644 test/572-checker-array-get-regression/expected.txt create mode 100644 test/572-checker-array-get-regression/info.txt create mode 100644 test/572-checker-array-get-regression/src/Main.java create mode 100644 test/573-checker-checkcast-regression/expected.txt create mode 100644 test/573-checker-checkcast-regression/info.txt create mode 100644 test/573-checker-checkcast-regression/src/Main.java create mode 100644 test/574-irreducible-and-constant-area/expected.txt create mode 100644 test/574-irreducible-and-constant-area/info.txt create mode 100755 test/574-irreducible-and-constant-area/run create mode 100644 test/574-irreducible-and-constant-area/smali/IrreducibleLoop.smali create mode 100644 test/574-irreducible-and-constant-area/src/Main.java create mode 100644 test/575-checker-isnan/expected.txt create mode 100644 test/575-checker-isnan/info.txt create mode 100644 test/575-checker-isnan/src/Main.java create mode 100644 test/575-checker-string-init-alias/expected.txt create mode 100644 test/575-checker-string-init-alias/info.txt create mode 100644 test/575-checker-string-init-alias/smali/TestCase.smali create mode 100644 test/575-checker-string-init-alias/src/Main.java create mode 100644 test/576-polymorphic-inlining/expected.txt create mode 100644 test/576-polymorphic-inlining/info.txt create mode 100644 test/576-polymorphic-inlining/src/Main.java create mode 100644 test/577-checker-fp2int/expected.txt create mode 100644 test/577-checker-fp2int/info.txt create mode 100644 test/577-checker-fp2int/src/Main.java create mode 100644 test/578-bce-visit/expected.txt create mode 100644 test/578-bce-visit/info.txt create mode 100644 test/578-bce-visit/src/Main.java create mode 100644 test/578-polymorphic-inlining/expected.txt create mode 100644 test/578-polymorphic-inlining/info.txt create mode 100644 test/578-polymorphic-inlining/src/Main.java create mode 100644 test/579-inline-infinite/expected.txt create mode 100644 test/579-inline-infinite/info.txt create mode 100644 test/579-inline-infinite/src/Main.java create mode 100644 test/580-checker-round/expected.txt create mode 100644 test/580-checker-round/info.txt create mode 100644 test/580-checker-round/src/Main.java create mode 100644 test/580-checker-string-fact-intrinsics/expected.txt create mode 100644 test/580-checker-string-fact-intrinsics/info.txt create mode 100644 test/580-checker-string-fact-intrinsics/src-art/Main.java create mode 100644 test/580-crc32/expected.txt create mode 100644 test/580-crc32/info.txt create mode 100644 test/580-crc32/src/Main.java create mode 100644 test/580-fp16/expected.txt create mode 100644 test/580-fp16/info.txt create mode 100644 test/580-fp16/src-art/Main.java create mode 100644 test/581-rtp/expected.txt create mode 100644 test/581-rtp/info.txt create mode 100644 test/581-rtp/src/Main.java create mode 100644 test/582-checker-bce-length/expected.txt create mode 100644 test/582-checker-bce-length/info.txt create mode 100644 test/582-checker-bce-length/src/Main.java create mode 100644 test/583-checker-zero/expected.txt create mode 100644 test/583-checker-zero/info.txt create mode 100644 test/583-checker-zero/src/Main.java create mode 100644 test/584-checker-div-bool/expected.txt create mode 100644 test/584-checker-div-bool/info.txt create mode 100644 test/584-checker-div-bool/src/Main.java create mode 100644 test/585-inline-unresolved/expected.txt create mode 100644 test/585-inline-unresolved/info.txt create mode 100644 test/585-inline-unresolved/smali/TestCase.smali create mode 100644 test/585-inline-unresolved/src/Main.java create mode 100644 test/586-checker-null-array-get/expected.txt create mode 100644 test/586-checker-null-array-get/info.txt create mode 100644 test/586-checker-null-array-get/smali/SmaliTests.smali create mode 100644 test/586-checker-null-array-get/src/Main.java create mode 100644 test/587-inline-class-error/expected.txt create mode 100644 test/587-inline-class-error/info.txt create mode 100644 test/587-inline-class-error/smali/SuperVerifyError.smali create mode 100644 test/587-inline-class-error/smali/TestCase.smali create mode 100644 test/587-inline-class-error/smali/VerifyError.smali create mode 100644 test/587-inline-class-error/src/Main.java create mode 100644 test/588-checker-irreducib-lifetime-hole/expected.txt create mode 100644 test/588-checker-irreducib-lifetime-hole/info.txt create mode 100644 test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali create mode 100644 test/588-checker-irreducib-lifetime-hole/src/Main.java create mode 100644 test/589-super-imt/expected.txt create mode 100644 test/589-super-imt/info.txt create mode 100644 test/589-super-imt/src/Main.java create mode 100644 test/590-checker-arr-set-null-regression/expected.txt create mode 100644 test/590-checker-arr-set-null-regression/info.txt create mode 100644 test/590-checker-arr-set-null-regression/src/Main.java create mode 100644 test/590-infinite-loop-with-nop/expected.txt create mode 100644 test/590-infinite-loop-with-nop/info.txt create mode 100644 test/590-infinite-loop-with-nop/smali/TestCase.smali create mode 100644 test/590-infinite-loop-with-nop/src/Main.java create mode 100644 test/591-checker-regression-dead-loop/expected.txt create mode 100644 test/591-checker-regression-dead-loop/info.txt create mode 100644 test/591-checker-regression-dead-loop/src/Main.java create mode 100644 test/591-new-instance-string/expected.txt create mode 100644 test/591-new-instance-string/info.txt create mode 100644 test/591-new-instance-string/smali/new-instance.smali create mode 100644 test/591-new-instance-string/src/Main.java create mode 100644 test/592-checker-regression-bool-input/expected.txt create mode 100644 test/592-checker-regression-bool-input/info.txt create mode 100644 test/592-checker-regression-bool-input/smali/TestCase.smali create mode 100644 test/592-checker-regression-bool-input/src/Main.java create mode 100644 test/593-checker-boolean-2-integral-conv/expected.txt create mode 100644 test/593-checker-boolean-2-integral-conv/info.txt create mode 100644 test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali create mode 100644 test/593-checker-boolean-2-integral-conv/src/Main.java create mode 100644 test/593-checker-long-2-float-regression/expected.txt create mode 100644 test/593-checker-long-2-float-regression/info.txt create mode 100644 test/593-checker-long-2-float-regression/src/Main.java create mode 100644 test/593-checker-shift-and-simplifier/expected.txt create mode 100644 test/593-checker-shift-and-simplifier/info.txt create mode 100644 test/593-checker-shift-and-simplifier/smali/SmaliTests.smali create mode 100644 test/593-checker-shift-and-simplifier/src/Main.java create mode 100644 test/594-checker-array-alias/expected.txt create mode 100644 test/594-checker-array-alias/info.txt create mode 100644 test/594-checker-array-alias/src/Main.java create mode 100644 test/594-checker-irreducible-linorder/expected.txt create mode 100644 test/594-checker-irreducible-linorder/info.txt create mode 100644 test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali create mode 100644 test/594-checker-irreducible-linorder/src/Main.java create mode 100644 test/594-invoke-super/expected.txt create mode 100644 test/594-invoke-super/info.txt create mode 100644 test/594-invoke-super/smali/invoke-super.smali create mode 100644 test/594-invoke-super/src/Main.java create mode 100644 test/594-load-string-regression/expected.txt create mode 100644 test/594-load-string-regression/info.txt create mode 100644 test/594-load-string-regression/src/Main.java create mode 100644 test/595-error-class/expected.txt create mode 100644 test/595-error-class/info.txt create mode 100644 test/595-error-class/smali/error.smali create mode 100644 test/595-error-class/smali/merge.smali create mode 100644 test/595-error-class/smali/super.smali create mode 100644 test/595-error-class/src/Main.java create mode 100644 test/595-profile-saving/expected.txt create mode 100644 test/595-profile-saving/info.txt create mode 100644 test/595-profile-saving/profile-saving.cc create mode 100644 test/595-profile-saving/run create mode 100644 test/595-profile-saving/src/Main.java create mode 100644 test/596-app-images/app_images.cc create mode 100644 test/596-app-images/expected.txt create mode 100644 test/596-app-images/info.txt create mode 100644 test/596-app-images/src/Main.java create mode 100644 test/596-checker-dead-phi/expected.txt create mode 100644 test/596-checker-dead-phi/info.txt create mode 100644 test/596-checker-dead-phi/smali/IrreducibleLoop.smali create mode 100644 test/596-checker-dead-phi/src/Main.java create mode 100644 test/596-monitor-inflation/expected.txt create mode 100644 test/596-monitor-inflation/info.txt create mode 100644 test/596-monitor-inflation/monitor_inflation.cc create mode 100644 test/596-monitor-inflation/src-art/Main.java create mode 100644 test/597-deopt-busy-loop/expected.txt create mode 100644 test/597-deopt-busy-loop/info.txt create mode 100644 test/597-deopt-busy-loop/run create mode 100644 test/597-deopt-busy-loop/src/FloatLoop.java create mode 100644 test/597-deopt-busy-loop/src/Main.java create mode 100644 test/597-deopt-busy-loop/src/SimdLoop.java create mode 100644 test/597-deopt-busy-loop/src/SimpleLoop.java create mode 100644 test/597-deopt-invoke-stub/expected.txt create mode 100644 test/597-deopt-invoke-stub/info.txt create mode 100644 test/597-deopt-invoke-stub/run create mode 100644 test/597-deopt-invoke-stub/src/Main.java create mode 100644 test/597-deopt-new-string/deopt.cc create mode 100644 test/597-deopt-new-string/expected.txt create mode 100644 test/597-deopt-new-string/info.txt create mode 100644 test/597-deopt-new-string/run create mode 100644 test/597-deopt-new-string/src/Main.java create mode 100644 test/598-checker-irreducible-dominance/expected.txt create mode 100644 test/598-checker-irreducible-dominance/info.txt create mode 100644 test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali create mode 100644 test/598-checker-irreducible-dominance/src/Main.java create mode 100644 test/599-checker-irreducible-loop/expected.txt create mode 100644 test/599-checker-irreducible-loop/info.txt create mode 100644 test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali create mode 100644 test/599-checker-irreducible-loop/src/Main.java create mode 100644 test/600-verifier-fails/expected.txt create mode 100644 test/600-verifier-fails/info.txt create mode 100644 test/600-verifier-fails/smali/class.smali create mode 100644 test/600-verifier-fails/smali/construct.smali create mode 100644 test/600-verifier-fails/smali/iget.smali create mode 100644 test/600-verifier-fails/smali/invoke.smali create mode 100644 test/600-verifier-fails/smali/iput.smali create mode 100644 test/600-verifier-fails/smali/sput.smali create mode 100644 test/600-verifier-fails/src/Main.java create mode 100644 test/601-method-access/expected.txt create mode 100644 test/601-method-access/info.txt create mode 100644 test/601-method-access/smali/SubClassUsingInaccessibleMethod.smali create mode 100644 test/601-method-access/src/Main.java create mode 100644 test/601-method-access/src/other/ProtectedClass.java create mode 100644 test/601-method-access/src/other/PublicClass.java create mode 100644 test/602-deoptimizeable/expected.txt create mode 100644 test/602-deoptimizeable/info.txt create mode 100644 test/602-deoptimizeable/src/Main.java create mode 100644 test/603-checker-instanceof/expected.txt create mode 100644 test/603-checker-instanceof/info.txt create mode 100644 test/603-checker-instanceof/src/Main.java create mode 100644 test/604-hot-static-interface/expected.txt create mode 100644 test/604-hot-static-interface/info.txt create mode 100644 test/604-hot-static-interface/src/Main.java create mode 100644 test/605-new-string-from-bytes/expected.txt create mode 100644 test/605-new-string-from-bytes/info.txt create mode 100644 test/605-new-string-from-bytes/src/Main.java create mode 100644 test/606-erroneous-class/expected.txt create mode 100644 test/606-erroneous-class/info.txt create mode 100644 test/606-erroneous-class/jasmin-multidex/ClassA.j create mode 100644 test/606-erroneous-class/smali/ClassB.smali create mode 100644 test/606-erroneous-class/smali/ErrClass.smali create mode 100644 test/606-erroneous-class/src/Main.java create mode 100644 test/607-daemon-stress/expected.txt create mode 100644 test/607-daemon-stress/info.txt create mode 100644 test/607-daemon-stress/src/Main.java create mode 100644 test/608-checker-unresolved-lse/expected.txt create mode 100644 test/608-checker-unresolved-lse/info.txt create mode 100644 test/608-checker-unresolved-lse/run create mode 100644 test/608-checker-unresolved-lse/src-dex2oat-unresolved/MissingSuperClass.java create mode 100644 test/608-checker-unresolved-lse/src/Main.java create mode 100644 test/609-checker-inline-interface/expected.txt create mode 100644 test/609-checker-inline-interface/info.txt create mode 100644 test/609-checker-inline-interface/src/Main.java create mode 100644 test/609-checker-x86-bounds-check/expected.txt create mode 100644 test/609-checker-x86-bounds-check/info.txt create mode 100644 test/609-checker-x86-bounds-check/src/Main.java create mode 100644 test/610-arraycopy/expected.txt create mode 100644 test/610-arraycopy/info.txt create mode 100644 test/610-arraycopy/src/Main.java create mode 100644 test/611-checker-simplify-if/expected.txt create mode 100644 test/611-checker-simplify-if/info.txt create mode 100644 test/611-checker-simplify-if/src/Main.java create mode 100644 test/612-jit-dex-cache/expected.txt create mode 100644 test/612-jit-dex-cache/info.txt create mode 100644 test/612-jit-dex-cache/src-art/A.java create mode 100644 test/612-jit-dex-cache/src-art/B.java create mode 100644 test/612-jit-dex-cache/src-art/Main.java create mode 100644 test/612-jit-dex-cache/src-ex/B.java create mode 100644 test/612-jit-dex-cache/src-ex/LoadedByAppClassLoader.java create mode 100644 test/613-inlining-dex-cache/expected.txt create mode 100644 test/613-inlining-dex-cache/info.txt create mode 100644 test/613-inlining-dex-cache/run create mode 100644 test/613-inlining-dex-cache/src-art/B.java create mode 100644 test/613-inlining-dex-cache/src-art/Main.java create mode 100644 test/613-inlining-dex-cache/src-ex/B.java create mode 100644 test/613-inlining-dex-cache/src-ex/LoadedByAppClassLoader.java create mode 100644 test/614-checker-dump-constant-location/expected.txt create mode 100644 test/614-checker-dump-constant-location/info.txt create mode 100644 test/614-checker-dump-constant-location/src/Main.java create mode 100644 test/615-checker-arm64-store-zero/expected.txt create mode 100644 test/615-checker-arm64-store-zero/info.txt create mode 100644 test/615-checker-arm64-store-zero/src/Main.java create mode 100644 test/616-cha-abstract/expected.txt create mode 100644 test/616-cha-abstract/info.txt create mode 100644 test/616-cha-abstract/run create mode 100644 test/616-cha-abstract/src/Main.java create mode 100644 test/616-cha-interface-default/build create mode 100644 test/616-cha-interface-default/expected.txt create mode 100644 test/616-cha-interface-default/info.txt create mode 100644 test/616-cha-interface-default/run create mode 100644 test/616-cha-interface-default/src-multidex/Base.java create mode 100644 test/616-cha-interface-default/src/Main.java create mode 100644 test/616-cha-interface/expected.txt create mode 100644 test/616-cha-interface/info.txt create mode 100644 test/616-cha-interface/run create mode 100644 test/616-cha-interface/src/Main.java create mode 100644 test/616-cha-miranda/expected.txt create mode 100644 test/616-cha-miranda/info.txt create mode 100644 test/616-cha-miranda/run create mode 100644 test/616-cha-miranda/src/Main.java create mode 100644 test/616-cha-native/expected.txt create mode 100644 test/616-cha-native/info.txt create mode 100644 test/616-cha-native/src/Main.java create mode 100644 test/616-cha-proxy-method-inline/expected.txt create mode 100644 test/616-cha-proxy-method-inline/info.txt create mode 100644 test/616-cha-proxy-method-inline/run create mode 100644 test/616-cha-proxy-method-inline/src-multidex/Foo.java create mode 100644 test/616-cha-proxy-method-inline/src/Main.java create mode 100644 test/616-cha-regression-proxy-method/expected.txt create mode 100644 test/616-cha-regression-proxy-method/info.txt create mode 100644 test/616-cha-regression-proxy-method/src/Main.java create mode 100644 test/616-cha-unloading/cha_unload.cc create mode 100644 test/616-cha-unloading/expected.txt create mode 100644 test/616-cha-unloading/info.txt create mode 100644 test/616-cha-unloading/run create mode 100644 test/616-cha-unloading/src-art/AbstractCHATester.java create mode 100644 test/616-cha-unloading/src-art/Main.java create mode 100644 test/616-cha-unloading/src-ex/AbstractCHATester.java create mode 100644 test/616-cha-unloading/src-ex/ConcreteCHATester.java create mode 100644 test/616-cha/expected.txt create mode 100644 test/616-cha/info.txt create mode 100644 test/616-cha/run create mode 100644 test/616-cha/src/Main.java create mode 100644 test/617-clinit-oome/expected.txt create mode 100644 test/617-clinit-oome/info.txt create mode 100644 test/617-clinit-oome/src/Main.java create mode 100644 test/617-clinit-oome/src/Other.java create mode 100644 test/618-checker-induction/expected.txt create mode 100644 test/618-checker-induction/info.txt create mode 100644 test/618-checker-induction/src/Main.java create mode 100644 test/619-checker-current-method/expected.txt create mode 100644 test/619-checker-current-method/info.txt create mode 100644 test/619-checker-current-method/src/Main.java create mode 100644 test/620-checker-bce-intrinsics/expected.txt create mode 100644 test/620-checker-bce-intrinsics/info.txt create mode 100644 test/620-checker-bce-intrinsics/src/Main.java create mode 100644 test/622-checker-bce-regressions/expected.txt create mode 100644 test/622-checker-bce-regressions/info.txt create mode 100644 test/622-checker-bce-regressions/src/Main.java create mode 100644 test/622-simplifyifs-exception-edges/expected.txt create mode 100644 test/622-simplifyifs-exception-edges/info.txt create mode 100644 test/622-simplifyifs-exception-edges/smali/Test.smali create mode 100644 test/622-simplifyifs-exception-edges/src/Main.java create mode 100644 test/623-checker-loop-regressions/expected.txt create mode 100644 test/623-checker-loop-regressions/info.txt create mode 100644 test/623-checker-loop-regressions/src/Main.java create mode 100644 test/624-checker-stringops/expected.txt create mode 100644 test/624-checker-stringops/info.txt create mode 100644 test/624-checker-stringops/smali/Smali.smali create mode 100644 test/624-checker-stringops/src/Main.java create mode 100644 test/625-checker-licm-regressions/expected.txt create mode 100644 test/625-checker-licm-regressions/info.txt create mode 100644 test/625-checker-licm-regressions/src/Main.java create mode 100644 test/626-checker-arm64-scratch-register/expected.txt create mode 100644 test/626-checker-arm64-scratch-register/info.txt create mode 100644 test/626-checker-arm64-scratch-register/smali/Main2.smali create mode 100644 test/626-checker-arm64-scratch-register/src-art/Main.java create mode 100644 test/626-checker-arm64-scratch-register/src/Main.java create mode 100644 test/626-const-class-linking/clear_dex_cache_types.cc create mode 100644 test/626-const-class-linking/expected.txt create mode 100644 test/626-const-class-linking/info.txt create mode 100644 test/626-const-class-linking/src-multidex/Helper2.java create mode 100644 test/626-const-class-linking/src-multidex/Helper3.java create mode 100644 test/626-const-class-linking/src-multidex/Test.java create mode 100644 test/626-const-class-linking/src-multidex/Test3.java create mode 100644 test/626-const-class-linking/src/ClassPair.java create mode 100644 test/626-const-class-linking/src/DefiningLoader.java create mode 100644 test/626-const-class-linking/src/DelegatingLoader.java create mode 100644 test/626-const-class-linking/src/Helper1.java create mode 100644 test/626-const-class-linking/src/Main.java create mode 100644 test/626-const-class-linking/src/MisbehavingLoader.java create mode 100644 test/626-const-class-linking/src/RacyLoader.java create mode 100644 test/626-const-class-linking/src/RacyMisbehavingHelper.java create mode 100644 test/626-const-class-linking/src/RacyMisbehavingLoader.java create mode 100644 test/626-set-resolved-string/expected.txt create mode 100644 test/626-set-resolved-string/info.txt create mode 100644 test/626-set-resolved-string/src/Main.java create mode 100644 test/627-checker-unroll/expected.txt create mode 100644 test/627-checker-unroll/info.txt create mode 100644 test/627-checker-unroll/src/Main.java create mode 100644 test/628-vdex/expected.txt create mode 100644 test/628-vdex/info.txt create mode 100644 test/628-vdex/run create mode 100644 test/628-vdex/src/Main.java create mode 100644 test/629-vdex-speed/expected.txt create mode 100644 test/629-vdex-speed/info.txt create mode 100644 test/629-vdex-speed/run create mode 100644 test/629-vdex-speed/src/Main.java create mode 100644 test/630-safecast-array/expected.txt create mode 100644 test/630-safecast-array/info.txt create mode 100644 test/630-safecast-array/smali/Main.smali create mode 100644 test/631-checker-fp-abs/expected.txt create mode 100644 test/631-checker-fp-abs/info.txt create mode 100644 test/631-checker-fp-abs/src/Main.java create mode 100644 test/631-checker-get-class/expected.txt create mode 100644 test/631-checker-get-class/info.txt create mode 100644 test/631-checker-get-class/src/Main.java create mode 100644 test/632-checker-char-at-bounds/expected.txt create mode 100644 test/632-checker-char-at-bounds/info.txt create mode 100644 test/632-checker-char-at-bounds/src/Main.java create mode 100644 test/633-checker-rtp-getclass/expected.txt create mode 100644 test/633-checker-rtp-getclass/info.txt create mode 100644 test/633-checker-rtp-getclass/smali/SmaliTests.smali create mode 100644 test/633-checker-rtp-getclass/src/Main.java create mode 100644 test/634-vdex-duplicate/expected.txt create mode 100644 test/634-vdex-duplicate/info.txt create mode 100644 test/634-vdex-duplicate/run create mode 100644 test/634-vdex-duplicate/src/Main.java create mode 100644 test/634-vdex-duplicate/src/sun/misc/Unsafe.java create mode 100644 test/635-checker-arm64-volatile-load-cc/expected.txt create mode 100644 test/635-checker-arm64-volatile-load-cc/info.txt create mode 100644 test/635-checker-arm64-volatile-load-cc/src/Main.java create mode 100644 test/636-arm64-veneer-pool/expected.txt create mode 100644 test/636-arm64-veneer-pool/info.txt create mode 100644 test/636-arm64-veneer-pool/src/Main.java create mode 100644 test/636-wrong-static-access/expected.txt create mode 100644 test/636-wrong-static-access/info.txt create mode 100755 test/636-wrong-static-access/run create mode 100644 test/636-wrong-static-access/src-ex/Foo.java create mode 100644 test/636-wrong-static-access/src/Holder.java create mode 100644 test/636-wrong-static-access/src/Main.java create mode 100644 test/636-wrong-static-access/src2/Holder.java create mode 100644 test/637-checker-throw-inline/expected.txt create mode 100644 test/637-checker-throw-inline/info.txt create mode 100644 test/637-checker-throw-inline/src/Main.java create mode 100644 test/638-checker-inline-cache-intrinsic/expected.txt create mode 100644 test/638-checker-inline-cache-intrinsic/info.txt create mode 100644 test/638-checker-inline-cache-intrinsic/run create mode 100644 test/638-checker-inline-cache-intrinsic/src/Main.java create mode 100644 test/638-checker-inline-caches/expected.txt create mode 100644 test/638-checker-inline-caches/info.txt create mode 100644 test/638-checker-inline-caches/profile create mode 100644 test/638-checker-inline-caches/run create mode 100644 test/638-checker-inline-caches/src-multidex/SubC.java create mode 100644 test/638-checker-inline-caches/src/Main.java create mode 100644 test/638-checker-inline-caches/src/Super.java create mode 100644 test/638-no-line-number/build create mode 100644 test/638-no-line-number/expected.txt create mode 100644 test/638-no-line-number/info.txt create mode 100644 test/638-no-line-number/src/Main.java create mode 100644 test/639-checker-code-sinking/expected.txt create mode 100644 test/639-checker-code-sinking/info.txt create mode 100644 test/639-checker-code-sinking/src/Main.java create mode 100644 test/640-checker-boolean-simd/expected.txt create mode 100644 test/640-checker-boolean-simd/info.txt create mode 100644 test/640-checker-boolean-simd/src/Main.java create mode 100644 test/640-checker-integer-valueof/expected.txt create mode 100644 test/640-checker-integer-valueof/info.txt create mode 100644 test/640-checker-integer-valueof/src/Main.java create mode 100644 test/640-checker-simd/expected.txt create mode 100644 test/640-checker-simd/info.txt create mode 100644 test/640-checker-simd/src/Main.java create mode 100644 test/640-checker-simd/src/SimdByte.java create mode 100644 test/640-checker-simd/src/SimdChar.java create mode 100644 test/640-checker-simd/src/SimdDouble.java create mode 100644 test/640-checker-simd/src/SimdFloat.java create mode 100644 test/640-checker-simd/src/SimdInt.java create mode 100644 test/640-checker-simd/src/SimdLong.java create mode 100644 test/640-checker-simd/src/SimdShort.java create mode 100644 test/641-checker-arraycopy/expected.txt create mode 100644 test/641-checker-arraycopy/info.txt create mode 100644 test/641-checker-arraycopy/src/Main.java create mode 100644 test/641-irreducible-inline/expected.txt create mode 100644 test/641-irreducible-inline/info.txt create mode 100644 test/641-irreducible-inline/smali/IrreducibleLoop.smali create mode 100644 test/641-irreducible-inline/src/Main.java create mode 100644 test/641-iterations/expected.txt create mode 100644 test/641-iterations/info.txt create mode 100644 test/641-iterations/src/Main.java create mode 100644 test/642-fp-callees/expected.txt create mode 100644 test/642-fp-callees/fp_callees.cc create mode 100644 test/642-fp-callees/info.txt create mode 100644 test/642-fp-callees/src/Main.java create mode 100644 test/643-checker-bogus-ic/expected.txt create mode 100644 test/643-checker-bogus-ic/info.txt create mode 100644 test/643-checker-bogus-ic/profile create mode 100644 test/643-checker-bogus-ic/run create mode 100644 test/643-checker-bogus-ic/src/Main.java create mode 100644 test/645-checker-abs-simd/expected.txt create mode 100644 test/645-checker-abs-simd/info.txt create mode 100644 test/645-checker-abs-simd/src/Main.java create mode 100644 test/646-checker-arraycopy-large-cst-pos/expected.txt create mode 100644 test/646-checker-arraycopy-large-cst-pos/info.txt create mode 100644 test/646-checker-arraycopy-large-cst-pos/src/Main.java create mode 100644 test/646-checker-long-const-to-int/expected.txt create mode 100644 test/646-checker-long-const-to-int/info.txt create mode 100644 test/646-checker-long-const-to-int/src/Main.java create mode 100644 test/646-checker-simd-hadd/expected.txt create mode 100644 test/646-checker-simd-hadd/info.txt create mode 100644 test/646-checker-simd-hadd/src/HaddAltByte.java create mode 100644 test/646-checker-simd-hadd/src/HaddAltChar.java create mode 100644 test/646-checker-simd-hadd/src/HaddAltShort.java create mode 100644 test/646-checker-simd-hadd/src/HaddByte.java create mode 100644 test/646-checker-simd-hadd/src/HaddChar.java create mode 100644 test/646-checker-simd-hadd/src/HaddShort.java create mode 100644 test/646-checker-simd-hadd/src/Main.java create mode 100644 test/647-jni-get-field-id/expected.txt create mode 100644 test/647-jni-get-field-id/get_field_id.cc create mode 100644 test/647-jni-get-field-id/info.txt create mode 100644 test/647-jni-get-field-id/src/DefiningLoader.java create mode 100644 test/647-jni-get-field-id/src/Main.java create mode 100644 test/647-sinking-catch/expected.txt create mode 100644 test/647-sinking-catch/info.txt create mode 100644 test/647-sinking-catch/smali/TestCase.smali create mode 100644 test/647-sinking-catch/src/Main.java create mode 100644 test/648-inline-caches-unresolved/expected.txt create mode 100644 test/648-inline-caches-unresolved/info.txt create mode 100644 test/648-inline-caches-unresolved/profile create mode 100644 test/648-inline-caches-unresolved/run create mode 100644 test/648-inline-caches-unresolved/src-dex2oat-unresolved/UnresolvedSuperClass.java create mode 100644 test/648-inline-caches-unresolved/src/Main.java create mode 100755 test/648-many-direct-methods/build create mode 100644 test/648-many-direct-methods/expected.txt create mode 100644 test/648-many-direct-methods/info.txt create mode 100755 test/648-many-direct-methods/util-src/generate_java.py create mode 100644 test/649-vdex-duplicate-method/classes.dex create mode 100644 test/649-vdex-duplicate-method/expected.txt create mode 100644 test/649-vdex-duplicate-method/info.txt create mode 100644 test/650-checker-inline-access-thunks/expected.txt create mode 100644 test/650-checker-inline-access-thunks/info.txt create mode 100644 test/650-checker-inline-access-thunks/src/Main.java create mode 100644 test/652-deopt-intrinsic/expected.txt create mode 100644 test/652-deopt-intrinsic/info.txt create mode 100755 test/652-deopt-intrinsic/run create mode 100644 test/652-deopt-intrinsic/src/Main.java create mode 100644 test/654-checker-periodic/expected.txt create mode 100644 test/654-checker-periodic/info.txt create mode 100644 test/654-checker-periodic/src/Main.java create mode 100644 test/655-checker-simd-arm-opt/expected.txt create mode 100644 test/655-checker-simd-arm-opt/info.txt create mode 100644 test/655-checker-simd-arm-opt/src/Main.java create mode 100644 test/655-jit-clinit/expected.txt create mode 100644 test/655-jit-clinit/info.txt create mode 100644 test/655-jit-clinit/src/Main.java create mode 100755 test/656-annotation-lookup-generic-jni/check create mode 100644 test/656-annotation-lookup-generic-jni/expected.txt create mode 100644 test/656-annotation-lookup-generic-jni/info.txt create mode 100644 test/656-annotation-lookup-generic-jni/src-art/Main.java create mode 100644 test/656-annotation-lookup-generic-jni/src-ex/DummyAnnotation.java create mode 100644 test/656-annotation-lookup-generic-jni/src-ex/Test.java create mode 100644 test/656-annotation-lookup-generic-jni/test.cc create mode 100644 test/656-checker-simd-opt/expected.txt create mode 100644 test/656-checker-simd-opt/info.txt create mode 100644 test/656-checker-simd-opt/smali/Smali.smali create mode 100644 test/656-checker-simd-opt/src/Main.java create mode 100644 test/656-loop-deopt/expected.txt create mode 100644 test/656-loop-deopt/info.txt create mode 100644 test/656-loop-deopt/src/Main.java create mode 100644 test/657-branches/expected.txt create mode 100644 test/657-branches/info.txt create mode 100644 test/657-branches/src/Main.java create mode 100644 test/658-fp-read-barrier/expected.txt create mode 100644 test/658-fp-read-barrier/info.txt create mode 100644 test/658-fp-read-barrier/src/Main.java create mode 100644 test/659-unpadded-array/expected.txt create mode 100644 test/659-unpadded-array/info.txt create mode 100644 test/659-unpadded-array/src-art/Main.java create mode 100644 test/660-checker-sad/expected.txt create mode 100644 test/660-checker-sad/info.txt create mode 100644 test/660-checker-sad/src/Main.java create mode 100644 test/660-checker-sad/src/SadByte.java create mode 100644 test/660-checker-sad/src/SadChar.java create mode 100644 test/660-checker-sad/src/SadInt.java create mode 100644 test/660-checker-sad/src/SadLong.java create mode 100644 test/660-checker-sad/src/SadShort.java create mode 100644 test/660-checker-simd-sad/expected.txt create mode 100644 test/660-checker-simd-sad/info.txt create mode 100644 test/660-checker-simd-sad/src/Main.java create mode 100644 test/660-checker-simd-sad/src/SimdSadByte.java create mode 100644 test/660-checker-simd-sad/src/SimdSadChar.java create mode 100644 test/660-checker-simd-sad/src/SimdSadInt.java create mode 100644 test/660-checker-simd-sad/src/SimdSadLong.java create mode 100644 test/660-checker-simd-sad/src/SimdSadShort.java create mode 100644 test/660-checker-simd-sad/src/SimdSadShort2.java create mode 100644 test/660-checker-simd-sad/src/SimdSadShort3.java create mode 100644 test/660-clinit/expected.txt create mode 100644 test/660-clinit/info.txt create mode 100644 test/660-clinit/profile create mode 100644 test/660-clinit/run create mode 100644 test/660-clinit/src/Main.java create mode 100644 test/660-store-8-16/expected.txt create mode 100644 test/660-store-8-16/info.txt create mode 100644 test/660-store-8-16/smali/TestCase.smali create mode 100644 test/660-store-8-16/src/Main.java create mode 100644 test/661-checker-simd-reduc/expected.txt create mode 100644 test/661-checker-simd-reduc/info.txt create mode 100644 test/661-checker-simd-reduc/src/Main.java create mode 100644 test/661-classloader-allocator/expected.txt create mode 100644 test/661-classloader-allocator/info.txt create mode 100644 test/661-classloader-allocator/src-ex/OtherClass.java create mode 100644 test/661-classloader-allocator/src/Main.java create mode 100644 test/661-oat-writer-layout/expected.no-compiled-code.txt create mode 100644 test/661-oat-writer-layout/expected.txt create mode 100644 test/661-oat-writer-layout/info.txt create mode 100644 test/661-oat-writer-layout/oat_writer_layout.cc create mode 100755 test/661-oat-writer-layout/parse_oatdump_offsets.sh create mode 100644 test/661-oat-writer-layout/profile create mode 100644 test/661-oat-writer-layout/run create mode 100644 test/661-oat-writer-layout/src/Generated.java create mode 100644 test/661-oat-writer-layout/src/Main.java create mode 100644 test/661-oat-writer-layout/src/Test.java create mode 100644 test/662-regression-alias/expected.txt create mode 100644 test/662-regression-alias/info.txt create mode 100644 test/662-regression-alias/src/Main.java create mode 100644 test/663-checker-select-generator/expected.txt create mode 100644 test/663-checker-select-generator/info.txt create mode 100644 test/663-checker-select-generator/smali/TestCase.smali create mode 100644 test/663-checker-select-generator/src/Main.java create mode 100644 test/663-odd-dex-size/classes.dex create mode 100644 test/663-odd-dex-size/expected.txt create mode 100644 test/663-odd-dex-size/info.txt create mode 100644 test/663-odd-dex-size2/663-odd-dex-size2.jar create mode 100644 test/663-odd-dex-size2/build create mode 100644 test/663-odd-dex-size2/expected.txt create mode 100644 test/663-odd-dex-size2/info.txt create mode 100644 test/663-odd-dex-size3/663-odd-dex-size3.jar create mode 100644 test/663-odd-dex-size3/build create mode 100644 test/663-odd-dex-size3/expected.txt create mode 100644 test/663-odd-dex-size3/info.txt create mode 100644 test/663-odd-dex-size4/663-odd-dex-size4.jar create mode 100644 test/663-odd-dex-size4/build create mode 100644 test/663-odd-dex-size4/expected.txt create mode 100644 test/663-odd-dex-size4/info.txt create mode 100644 test/664-aget-verifier/aget-verifier.cc create mode 100644 test/664-aget-verifier/expected.txt create mode 100644 test/664-aget-verifier/info.txt create mode 100644 test/664-aget-verifier/src/Main.java create mode 100644 test/665-checker-simd-zero/expected.txt create mode 100644 test/665-checker-simd-zero/info.txt create mode 100644 test/665-checker-simd-zero/src/Main.java create mode 100644 test/666-dex-cache-itf/expected.txt create mode 100644 test/666-dex-cache-itf/info.txt create mode 100644 test/666-dex-cache-itf/src/Main.java create mode 100644 test/667-checker-simd-alignment/expected.txt create mode 100644 test/667-checker-simd-alignment/info.txt create mode 100644 test/667-checker-simd-alignment/src/Main.java create mode 100644 test/667-jit-jni-stub/expected.txt create mode 100644 test/667-jit-jni-stub/info.txt create mode 100644 test/667-jit-jni-stub/jit_jni_stub_test.cc create mode 100755 test/667-jit-jni-stub/run create mode 100644 test/667-jit-jni-stub/src/Main.java create mode 100644 test/667-out-of-bounds/expected.txt create mode 100644 test/667-out-of-bounds/info.txt create mode 100644 test/667-out-of-bounds/src/Main.java create mode 100644 test/668-aiobe/expected.txt create mode 100644 test/668-aiobe/info.txt create mode 100644 test/668-aiobe/smali/TestCase.smali create mode 100644 test/668-aiobe/src/Main.java create mode 100644 test/669-checker-break/expected.txt create mode 100644 test/669-checker-break/info.txt create mode 100644 test/669-checker-break/src/Main.java create mode 100644 test/670-bitstring-type-check/build create mode 100644 test/670-bitstring-type-check/expected.txt create mode 100644 test/670-bitstring-type-check/info.txt create mode 100644 test/670-bitstring-type-check/run create mode 100644 test/671-npe-field-opts/expected.txt create mode 100644 test/671-npe-field-opts/info.txt create mode 100644 test/671-npe-field-opts/src/Main.java create mode 100644 test/672-checker-throw-method/expected.txt create mode 100644 test/672-checker-throw-method/info.txt create mode 100644 test/672-checker-throw-method/src/Main.java create mode 100644 test/673-checker-throw-vmethod/expected.txt create mode 100644 test/673-checker-throw-vmethod/info.txt create mode 100644 test/673-checker-throw-vmethod/src/Main.java create mode 100644 test/674-HelloWorld-Dm/expected.txt create mode 100644 test/674-HelloWorld-Dm/info.txt create mode 100644 test/674-HelloWorld-Dm/run create mode 100644 test/674-HelloWorld-Dm/src/Main.java create mode 100644 test/674-hiddenapi/build create mode 100644 test/674-hiddenapi/check create mode 100644 test/674-hiddenapi/expected.txt create mode 100644 test/674-hiddenapi/hiddenapi-flags.csv create mode 100644 test/674-hiddenapi/hiddenapi.cc create mode 100644 test/674-hiddenapi/info.txt create mode 100755 test/674-hiddenapi/run create mode 100644 test/674-hiddenapi/src-art/Main.java create mode 100644 test/674-hiddenapi/src-ex/ChildClass.java create mode 100644 test/674-hiddenapi/src-ex/JLI.java create mode 100644 test/674-hiddenapi/src-ex/JNI.java create mode 100644 test/674-hiddenapi/src-ex/Linking.java create mode 100644 test/674-hiddenapi/src-ex/Reflection.java create mode 100644 test/674-hiddenapi/src/DummyClass.java create mode 100644 test/674-hiddenapi/src/NullaryConstructorBlacklist.java create mode 100644 test/674-hiddenapi/src/NullaryConstructorBlacklistAndCorePlatformApi.java create mode 100644 test/674-hiddenapi/src/NullaryConstructorDarkGreylist.java create mode 100644 test/674-hiddenapi/src/NullaryConstructorLightGreylist.java create mode 100644 test/674-hiddenapi/src/NullaryConstructorWhitelist.java create mode 100644 test/674-hiddenapi/src/ParentClass.java create mode 100644 test/674-hiddenapi/src/ParentInterface.java create mode 100644 test/674-hotness-compiled/expected.txt create mode 100644 test/674-hotness-compiled/info.txt create mode 100755 test/674-hotness-compiled/run create mode 100644 test/674-hotness-compiled/src/Main.java create mode 100755 test/674-vdex-uncompress/build create mode 100644 test/674-vdex-uncompress/expected.txt create mode 100644 test/674-vdex-uncompress/info.txt create mode 100644 test/674-vdex-uncompress/run create mode 100644 test/674-vdex-uncompress/src/Main.java create mode 100644 test/675-checker-unverified-method/expected.txt create mode 100644 test/675-checker-unverified-method/info.txt create mode 100644 test/675-checker-unverified-method/smali/TestCase.smali create mode 100644 test/675-checker-unverified-method/src/Main.java create mode 100644 test/676-proxy-jit-at-first-use/expected.txt create mode 100644 test/676-proxy-jit-at-first-use/info.txt create mode 100644 test/676-proxy-jit-at-first-use/run create mode 100644 test/676-proxy-jit-at-first-use/src/Main.java create mode 100644 test/676-resolve-field-type/expected.txt create mode 100644 test/676-resolve-field-type/info.txt create mode 100644 test/676-resolve-field-type/src-art/Foo.java create mode 100644 test/676-resolve-field-type/src-art/Main.java create mode 100644 test/676-resolve-field-type/src-ex/ChildClass.java create mode 100755 test/677-fsi/build create mode 100644 test/677-fsi/check create mode 100644 test/677-fsi/expected.txt create mode 100644 test/677-fsi/info.txt create mode 100644 test/677-fsi/run create mode 100644 test/677-fsi/src/Main.java create mode 100644 test/677-fsi2/expected.txt create mode 100644 test/677-fsi2/info.txt create mode 100644 test/677-fsi2/run create mode 100644 test/677-fsi2/src/Main.java create mode 100644 test/678-quickening/expected.txt create mode 100644 test/678-quickening/info.txt create mode 100644 test/678-quickening/run create mode 100644 test/678-quickening/src-art/Main.java create mode 100644 test/679-checker-minmax/expected.txt create mode 100644 test/679-checker-minmax/info.txt create mode 100644 test/679-checker-minmax/src/Main.java create mode 100644 test/679-locks/expected.txt create mode 100644 test/679-locks/info.txt create mode 100644 test/679-locks/run create mode 100644 test/679-locks/src/Main.java create mode 100644 test/680-checker-deopt-dex-pc-0/expected.txt create mode 100644 test/680-checker-deopt-dex-pc-0/info.txt create mode 100644 test/680-checker-deopt-dex-pc-0/src/Main.java create mode 100644 test/680-sink-regression/expected.txt create mode 100644 test/680-sink-regression/info.txt create mode 100644 test/680-sink-regression/src/Main.java create mode 100644 test/681-checker-abs/expected.txt create mode 100644 test/681-checker-abs/info.txt create mode 100644 test/681-checker-abs/src/Main.java create mode 100644 test/682-double-catch-phi/expected.txt create mode 100644 test/682-double-catch-phi/info.txt create mode 100644 test/682-double-catch-phi/smali/DoubleCatchPhi.smali create mode 100644 test/682-double-catch-phi/src/Main.java create mode 100644 test/683-clinit-inline-static-invoke/expected.txt create mode 100644 test/683-clinit-inline-static-invoke/info.txt create mode 100644 test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java create mode 100644 test/683-clinit-inline-static-invoke/src/Main.java create mode 100644 test/684-checker-simd-dotprod/expected.txt create mode 100644 test/684-checker-simd-dotprod/info.txt create mode 100644 test/684-checker-simd-dotprod/src/Main.java create mode 100644 test/684-checker-simd-dotprod/src/other/TestByte.java create mode 100644 test/684-checker-simd-dotprod/src/other/TestCharShort.java create mode 100644 test/684-checker-simd-dotprod/src/other/TestFloatDouble.java create mode 100644 test/684-checker-simd-dotprod/src/other/TestVarious.java create mode 100644 test/684-select-condition/expected.txt create mode 100644 test/684-select-condition/info.txt create mode 100644 test/684-select-condition/src/Main.java create mode 100644 test/685-deoptimizeable/expected.txt create mode 100644 test/685-deoptimizeable/info.txt create mode 100644 test/685-deoptimizeable/src/Main.java create mode 100644 test/685-shifts/expected.txt create mode 100644 test/685-shifts/info.txt create mode 100644 test/685-shifts/smali/Test.smali create mode 100644 test/685-shifts/src/Main.java create mode 100644 test/686-get-this/expected.txt create mode 100644 test/686-get-this/info.txt create mode 100644 test/686-get-this/smali/Test.smali create mode 100644 test/686-get-this/src/Main.java create mode 100644 test/687-deopt/expected.txt create mode 100644 test/687-deopt/info.txt create mode 100644 test/687-deopt/src/Main.java create mode 100644 test/688-shared-library/check create mode 100644 test/688-shared-library/expected.txt create mode 100644 test/688-shared-library/info.txt create mode 100644 test/688-shared-library/run create mode 100644 test/688-shared-library/src-art/Main.java create mode 100644 test/688-shared-library/src-ex/Main.java create mode 100644 test/688-shared-library/src-ex/SharedLibraryOne.java create mode 100644 test/689-multi-catch/expected.txt create mode 100644 test/689-multi-catch/info.txt create mode 100644 test/689-multi-catch/src/Main.java create mode 100644 test/689-zygote-jit-deopt/expected.txt create mode 100644 test/689-zygote-jit-deopt/info.txt create mode 100644 test/689-zygote-jit-deopt/run create mode 100644 test/689-zygote-jit-deopt/src/Main.java create mode 100644 test/690-hiddenapi-same-name-methods/build create mode 100644 test/690-hiddenapi-same-name-methods/expected.txt create mode 100644 test/690-hiddenapi-same-name-methods/hiddenapi-flags.csv create mode 100644 test/690-hiddenapi-same-name-methods/info.txt create mode 100644 test/690-hiddenapi-same-name-methods/smali-ex/DirectMethods.smali create mode 100644 test/690-hiddenapi-same-name-methods/smali-ex/NonSyntheticMethods.smali create mode 100644 test/690-hiddenapi-same-name-methods/smali-ex/SyntheticMethods.smali create mode 100644 test/690-hiddenapi-same-name-methods/smali-ex/VirtualMethods.smali create mode 100644 test/690-hiddenapi-same-name-methods/src-ex/GenericInterface.java create mode 100644 test/690-hiddenapi-same-name-methods/src-ex/SpecificClass.java create mode 100644 test/690-hiddenapi-same-name-methods/src/Main.java create mode 100644 test/691-hiddenapi-proxy/build create mode 100644 test/691-hiddenapi-proxy/expected.txt create mode 100644 test/691-hiddenapi-proxy/hiddenapi-flags.csv create mode 100644 test/691-hiddenapi-proxy/info.txt create mode 100644 test/691-hiddenapi-proxy/src-ex/MyInterface.java create mode 100644 test/691-hiddenapi-proxy/src/Main.java create mode 100644 test/692-vdex-inmem-loader/expected.txt create mode 100644 test/692-vdex-inmem-loader/info.txt create mode 100644 test/692-vdex-inmem-loader/src-ex/DummyClass.java create mode 100644 test/692-vdex-inmem-loader/src-secondary/art/ClassA.java create mode 100644 test/692-vdex-inmem-loader/src-secondary/art/ClassB.java create mode 100755 test/692-vdex-inmem-loader/src-secondary/gen.sh create mode 100644 test/692-vdex-inmem-loader/src/Main.java create mode 100644 test/692-vdex-inmem-loader/vdex_inmem_loader.cc create mode 100644 test/693-vdex-inmem-loader-evict/expected.txt create mode 100644 test/693-vdex-inmem-loader-evict/info.txt create mode 100755 test/693-vdex-inmem-loader-evict/src-secondary/gen.sh create mode 100644 test/693-vdex-inmem-loader-evict/src/Main.java create mode 100644 test/694-clinit-jit/expected.txt create mode 100644 test/694-clinit-jit/info.txt create mode 100644 test/694-clinit-jit/src/Main.java create mode 100644 test/695-simplify-throws/expected.txt create mode 100644 test/695-simplify-throws/info.txt create mode 100644 test/695-simplify-throws/src/Main.java create mode 100644 test/696-loop/expected.txt create mode 100644 test/696-loop/info.txt create mode 100644 test/696-loop/src/Main.java create mode 100644 test/697-checker-string-append/expected.txt create mode 100644 test/697-checker-string-append/info.txt create mode 100644 test/697-checker-string-append/src/Main.java create mode 100644 test/698-selects/expected.txt create mode 100644 test/698-selects/info.txt create mode 100644 test/698-selects/src/Main.java create mode 100644 test/699-checker-string-append2/expected.txt create mode 100644 test/699-checker-string-append2/info.txt create mode 100644 test/699-checker-string-append2/smali/B146014745.smali create mode 100644 test/699-checker-string-append2/src/Main.java create mode 100644 test/700-LoadArgRegs/expected.txt create mode 100644 test/700-LoadArgRegs/info.txt create mode 100644 test/700-LoadArgRegs/src/Main.java create mode 100644 test/701-easy-div-rem/build create mode 100644 test/701-easy-div-rem/expected.txt create mode 100644 test/701-easy-div-rem/genMain.py create mode 100644 test/701-easy-div-rem/info.txt create mode 100644 test/702-LargeBranchOffset/build create mode 100644 test/702-LargeBranchOffset/expected.txt create mode 100644 test/702-LargeBranchOffset/info.txt create mode 100644 test/702-LargeBranchOffset/src/Main.java.in create mode 100644 test/703-floating-point-div/expected.txt create mode 100644 test/703-floating-point-div/info.txt create mode 100644 test/703-floating-point-div/src/Main.java create mode 100644 test/704-multiply-accumulate/expected.txt create mode 100644 test/704-multiply-accumulate/info.txt create mode 100644 test/704-multiply-accumulate/src/Main.java create mode 100644 test/705-register-conflict/expected.txt create mode 100644 test/705-register-conflict/info.txt create mode 100644 test/705-register-conflict/src/Main.java create mode 100644 test/706-checker-scheduler/expected.txt create mode 100644 test/706-checker-scheduler/info.txt create mode 100644 test/706-checker-scheduler/run create mode 100644 test/706-checker-scheduler/src-dex2oat-unresolved/UnresolvedClass.java create mode 100644 test/706-checker-scheduler/src/Main.java create mode 100755 test/707-checker-invalid-profile/check create mode 100644 test/707-checker-invalid-profile/expected.txt create mode 100644 test/707-checker-invalid-profile/info.txt create mode 100644 test/707-checker-invalid-profile/profile create mode 100644 test/707-checker-invalid-profile/run create mode 100644 test/707-checker-invalid-profile/src/Main.java create mode 100644 test/708-jit-cache-churn/expected.txt create mode 100644 test/708-jit-cache-churn/info.txt create mode 100644 test/708-jit-cache-churn/jit.cc create mode 100644 test/708-jit-cache-churn/src/JitCacheChurnTest.java create mode 100644 test/708-jit-cache-churn/src/Main.java create mode 100755 test/709-checker-varhandles/build create mode 100644 test/709-checker-varhandles/expected.txt create mode 100644 test/709-checker-varhandles/info.txt create mode 100644 test/709-checker-varhandles/src/Main.java create mode 100644 test/710-varhandle-creation/build create mode 100644 test/710-varhandle-creation/expected.txt create mode 100644 test/710-varhandle-creation/info.txt create mode 100644 test/710-varhandle-creation/src/Main.java create mode 100644 test/711-checker-type-conversion/expected.txt create mode 100644 test/711-checker-type-conversion/info.txt create mode 100644 test/711-checker-type-conversion/src/Main.java create mode 100755 test/712-varhandle-invocations/build create mode 100644 test/712-varhandle-invocations/expected.txt create mode 100644 test/712-varhandle-invocations/info.txt create mode 100644 test/712-varhandle-invocations/src/SampleValues.java create mode 100644 test/712-varhandle-invocations/src/SimpleTests.java create mode 100644 test/712-varhandle-invocations/src/VarHandleAccessorExceptionTests.java create mode 100644 test/712-varhandle-invocations/src/VarHandleBadCoordinateTests.java create mode 100644 test/712-varhandle-invocations/src/VarHandleReflectiveTest.java create mode 100644 test/712-varhandle-invocations/src/VarHandleTypeConversionTests.java create mode 100644 test/712-varhandle-invocations/src/VarHandleUnitTest.java create mode 100644 test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java create mode 100644 test/712-varhandle-invocations/src/VarHandleUnitTestHelpers.java create mode 100644 test/712-varhandle-invocations/src/Widget.java create mode 100644 test/712-varhandle-invocations/util-src/generate_java.py create mode 100755 test/713-varhandle-invokers/build create mode 100644 test/713-varhandle-invokers/expected.txt create mode 100644 test/713-varhandle-invokers/info.txt create mode 100644 test/713-varhandle-invokers/src/Main.java create mode 100644 test/714-invoke-custom-lambda-metafactory/build create mode 100644 test/714-invoke-custom-lambda-metafactory/expected.txt create mode 100644 test/714-invoke-custom-lambda-metafactory/info.txt create mode 100755 test/714-invoke-custom-lambda-metafactory/run create mode 100644 test/714-invoke-custom-lambda-metafactory/src/Main.java create mode 100644 test/715-clinit-implicit-parameter-annotations/build create mode 100644 test/715-clinit-implicit-parameter-annotations/expected.txt create mode 100644 test/715-clinit-implicit-parameter-annotations/info.txt create mode 100644 test/715-clinit-implicit-parameter-annotations/src/Main.java create mode 100755 test/716-jli-jit-samples/build create mode 100644 test/716-jli-jit-samples/expected.txt create mode 100644 test/716-jli-jit-samples/info.txt create mode 100644 test/716-jli-jit-samples/src-art/Main.java create mode 100644 test/717-integer-value-of/expected.txt create mode 100644 test/717-integer-value-of/info.txt create mode 100644 test/717-integer-value-of/src/Main.java create mode 100644 test/718-zipfile-finalizer/expected.txt create mode 100644 test/718-zipfile-finalizer/info.txt create mode 100644 test/718-zipfile-finalizer/src/Main.java create mode 100644 test/719-dm-verify-redefinition/check create mode 100644 test/719-dm-verify-redefinition/expected.txt create mode 100644 test/719-dm-verify-redefinition/info.txt create mode 100644 test/719-dm-verify-redefinition/run create mode 100644 test/719-dm-verify-redefinition/src-ex/Redefined.java create mode 100644 test/719-dm-verify-redefinition/src/Main.java create mode 100644 test/719-dm-verify-redefinition/src/Redefined.java create mode 100644 test/720-thread-priority/expected.txt create mode 100644 test/720-thread-priority/info.txt create mode 100644 test/720-thread-priority/src/Main.java create mode 100644 test/720-thread-priority/thread_priority.cc create mode 100644 test/721-osr/expected.txt create mode 100644 test/721-osr/info.txt create mode 100644 test/721-osr/src/Main.java create mode 100644 test/723-string-init-range/expected.txt create mode 100644 test/723-string-init-range/info.txt create mode 100644 test/723-string-init-range/smali/new-instance.smali create mode 100644 test/723-string-init-range/src/Main.java create mode 100644 test/724-invoke-super-npe/expected.txt create mode 100644 test/724-invoke-super-npe/info.txt create mode 100644 test/724-invoke-super-npe/smali/TestCase.smali create mode 100644 test/724-invoke-super-npe/src/Main.java create mode 100644 test/725-imt-conflict-object/expected.txt create mode 100644 test/725-imt-conflict-object/info.txt create mode 100644 test/725-imt-conflict-object/smali/TestCase.smali create mode 100644 test/725-imt-conflict-object/src/Main.java create mode 100644 test/800-smali/expected.txt create mode 100644 test/800-smali/info.txt create mode 100644 test/800-smali/jni.cc create mode 100644 test/800-smali/smali/B30458218.smali create mode 100644 test/800-smali/smali/BadCaseInOpRegRegReg.smali create mode 100644 test/800-smali/smali/CmpLong.smali create mode 100644 test/800-smali/smali/ConstClassAliasing.smali create mode 100644 test/800-smali/smali/EmptySparseSwitch.smali create mode 100644 test/800-smali/smali/FloatBadArgReg.smali create mode 100644 test/800-smali/smali/FloatIntConstPassing.smali create mode 100644 test/800-smali/smali/PackedSwitch.smali create mode 100644 test/800-smali/smali/b_121191566.smali create mode 100644 test/800-smali/smali/b_121245951.smali create mode 100644 test/800-smali/smali/b_121245951_2.smali create mode 100644 test/800-smali/smali/b_121245951_3.smali create mode 100644 test/800-smali/smali/b_122501785.smali create mode 100644 test/800-smali/smali/b_134061982.smali create mode 100644 test/800-smali/smali/b_134061983_2.smali create mode 100644 test/800-smali/smali/b_17410612.smali create mode 100644 test/800-smali/smali/b_17790197.smali create mode 100644 test/800-smali/smali/b_18380491AbstractBase.smali create mode 100644 test/800-smali/smali/b_18380491ConcreteClass.smali create mode 100644 test/800-smali/smali/b_18718277.smali create mode 100644 test/800-smali/smali/b_18800943_1.smali create mode 100644 test/800-smali/smali/b_18800943_2.smali create mode 100644 test/800-smali/smali/b_20224106.smali create mode 100644 test/800-smali/smali/b_20843113.smali create mode 100644 test/800-smali/smali/b_21614284.smali create mode 100644 test/800-smali/smali/b_21645819.smali create mode 100644 test/800-smali/smali/b_21863767.smali create mode 100644 test/800-smali/smali/b_21869691A.smali create mode 100644 test/800-smali/smali/b_21869691B.smali create mode 100644 test/800-smali/smali/b_21869691C.smali create mode 100644 test/800-smali/smali/b_21869691I.smali create mode 100644 test/800-smali/smali/b_21873167.smali create mode 100644 test/800-smali/smali/b_21886894.smali create mode 100644 test/800-smali/smali/b_21902684.smali create mode 100644 test/800-smali/smali/b_22045582.smali create mode 100644 test/800-smali/smali/b_22045582_int.smali create mode 100644 test/800-smali/smali/b_22045582_wide.smali create mode 100644 test/800-smali/smali/b_22080519.smali create mode 100644 test/800-smali/smali/b_22244733.smali create mode 100644 test/800-smali/smali/b_22331663.smali create mode 100644 test/800-smali/smali/b_22331663_fail.smali create mode 100644 test/800-smali/smali/b_22331663_pass.smali create mode 100644 test/800-smali/smali/b_22411633_1.smali create mode 100644 test/800-smali/smali/b_22411633_2.smali create mode 100644 test/800-smali/smali/b_22411633_3.smali create mode 100644 test/800-smali/smali/b_22411633_4.smali create mode 100644 test/800-smali/smali/b_22411633_5.smali create mode 100644 test/800-smali/smali/b_22777307.smali create mode 100644 test/800-smali/smali/b_22881413.smali create mode 100644 test/800-smali/smali/b_23201502.smali create mode 100644 test/800-smali/smali/b_23300986.smali create mode 100644 test/800-smali/smali/b_23502994.smali create mode 100644 test/800-smali/smali/b_24399945.smali create mode 100644 test/800-smali/smali/b_25494456.smali create mode 100644 test/800-smali/smali/b_26143249.smali create mode 100644 test/800-smali/smali/b_26579108.smali create mode 100644 test/800-smali/smali/b_26594149_1.smali create mode 100644 test/800-smali/smali/b_26594149_2.smali create mode 100644 test/800-smali/smali/b_26594149_3.smali create mode 100644 test/800-smali/smali/b_26594149_4.smali create mode 100644 test/800-smali/smali/b_26594149_5.smali create mode 100644 test/800-smali/smali/b_26594149_6.smali create mode 100644 test/800-smali/smali/b_26594149_7.smali create mode 100644 test/800-smali/smali/b_26594149_8.smali create mode 100644 test/800-smali/smali/b_26965384.smali create mode 100644 test/800-smali/smali/b_26965384Super.smali create mode 100644 test/800-smali/smali/b_27148248.smali create mode 100644 test/800-smali/smali/b_27799205_1.smali create mode 100644 test/800-smali/smali/b_27799205_2.smali create mode 100644 test/800-smali/smali/b_27799205_3.smali create mode 100644 test/800-smali/smali/b_27799205_4.smali create mode 100644 test/800-smali/smali/b_27799205_5.smali create mode 100644 test/800-smali/smali/b_27799205_6.smali create mode 100644 test/800-smali/smali/b_27799205_helper.smali create mode 100644 test/800-smali/smali/b_28187158.smali create mode 100644 test/800-smali/smali/b_29778499_1.smali create mode 100644 test/800-smali/smali/b_29778499_2.smali create mode 100644 test/800-smali/smali/b_31313170.smali create mode 100644 test/800-smali/smali/move_exc.smali create mode 100644 test/800-smali/smali/move_exception_on_entry.smali create mode 100755 test/800-smali/smali/negLong.smali create mode 100644 test/800-smali/smali/sameFieldNames.smali create mode 100644 test/800-smali/src/Main.java create mode 100644 test/800-smali/src/pkg/ProtectedClass.java create mode 100644 test/801-VoidCheckCast/classes.dex create mode 100644 test/801-VoidCheckCast/expected.txt create mode 100644 test/801-VoidCheckCast/info.txt create mode 100644 test/802-deoptimization/expected.txt create mode 100644 test/802-deoptimization/info.txt create mode 100644 test/802-deoptimization/smali/catch_handler_on_entry.smali create mode 100644 test/802-deoptimization/src/CatchHandlerOnEntryHelper.java create mode 100644 test/802-deoptimization/src/DeoptimizationController.java create mode 100644 test/802-deoptimization/src/Main.java create mode 100644 test/803-no-super/expected.txt create mode 100644 test/803-no-super/info.txt create mode 100644 test/803-no-super/smali/nosuper1.smali create mode 100644 test/803-no-super/src/Main.java create mode 100644 test/804-class-extends-itself/build create mode 100644 test/804-class-extends-itself/expected.txt create mode 100644 test/804-class-extends-itself/info.txt create mode 100644 test/804-class-extends-itself/smali/Main.smali create mode 100644 test/804-class-extends-itself/smali/b_28685551.smali create mode 100644 test/805-TooDeepClassInstanceOf/expected.txt create mode 100644 test/805-TooDeepClassInstanceOf/info.txt create mode 100644 test/805-TooDeepClassInstanceOf/src/Main.java create mode 100644 test/806-TooWideClassInstanceOf/expected.txt create mode 100644 test/806-TooWideClassInstanceOf/info.txt create mode 100644 test/806-TooWideClassInstanceOf/src/Main.java create mode 100755 test/807-method-handle-and-mr/build create mode 100644 test/807-method-handle-and-mr/expected.txt create mode 100644 test/807-method-handle-and-mr/info.txt create mode 100644 test/807-method-handle-and-mr/src/Main.java create mode 100644 test/900-hello-plugin/expected.txt create mode 100644 test/900-hello-plugin/info.txt create mode 100644 test/900-hello-plugin/load_unload.cc create mode 100755 test/900-hello-plugin/run create mode 100644 test/900-hello-plugin/src/Main.java create mode 100644 test/901-hello-ti-agent/basics.cc create mode 100644 test/901-hello-ti-agent/basics.h create mode 100644 test/901-hello-ti-agent/expected.txt create mode 100644 test/901-hello-ti-agent/info.txt create mode 100755 test/901-hello-ti-agent/run create mode 100644 test/901-hello-ti-agent/src/Main.java create mode 120000 test/901-hello-ti-agent/src/art/Main.java create mode 100644 test/901-hello-ti-agent/src/art/Test901.java create mode 100644 test/902-hello-transformation/expected.txt create mode 100644 test/902-hello-transformation/info.txt create mode 100755 test/902-hello-transformation/run create mode 100644 test/902-hello-transformation/src/Main.java create mode 120000 test/902-hello-transformation/src/art/Redefinition.java create mode 100644 test/902-hello-transformation/src/art/Test902.java create mode 100644 test/903-hello-tagging/expected.txt create mode 100644 test/903-hello-tagging/info.txt create mode 100755 test/903-hello-tagging/run create mode 100644 test/903-hello-tagging/src/Main.java create mode 120000 test/903-hello-tagging/src/art/Main.java create mode 100644 test/903-hello-tagging/src/art/Test903.java create mode 100644 test/903-hello-tagging/tagging.cc create mode 100644 test/904-object-allocation/expected.txt create mode 100644 test/904-object-allocation/info.txt create mode 100755 test/904-object-allocation/run create mode 100644 test/904-object-allocation/src/Main.java create mode 100644 test/904-object-allocation/src/art/Test904.java create mode 100644 test/904-object-allocation/tracking.cc create mode 100644 test/905-object-free/expected.txt create mode 100644 test/905-object-free/info.txt create mode 100755 test/905-object-free/run create mode 100644 test/905-object-free/src/Main.java create mode 120000 test/905-object-free/src/art/Main.java create mode 100644 test/905-object-free/src/art/Test905.java create mode 100644 test/905-object-free/tracking_free.cc create mode 100644 test/906-iterate-heap/expected.txt create mode 100644 test/906-iterate-heap/info.txt create mode 100644 test/906-iterate-heap/iterate_heap.cc create mode 100755 test/906-iterate-heap/run create mode 100644 test/906-iterate-heap/src/Main.java create mode 120000 test/906-iterate-heap/src/art/Main.java create mode 100644 test/906-iterate-heap/src/art/Test906.java create mode 100644 test/907-get-loaded-classes/expected.txt create mode 100644 test/907-get-loaded-classes/get_loaded_classes.cc create mode 100644 test/907-get-loaded-classes/info.txt create mode 100755 test/907-get-loaded-classes/run create mode 100644 test/907-get-loaded-classes/src/Main.java create mode 100644 test/907-get-loaded-classes/src/art/Cerr.java create mode 100644 test/907-get-loaded-classes/src/art/Test907.java create mode 100644 test/908-gc-start-finish/expected.txt create mode 100644 test/908-gc-start-finish/gc_callbacks.cc create mode 100644 test/908-gc-start-finish/info.txt create mode 100755 test/908-gc-start-finish/run create mode 100644 test/908-gc-start-finish/src/Main.java create mode 100644 test/908-gc-start-finish/src/art/Test908.java create mode 100644 test/909-attach-agent/attach.cc create mode 100644 test/909-attach-agent/attach.h create mode 100644 test/909-attach-agent/disallow_debugging.cc create mode 100644 test/909-attach-agent/expected.txt create mode 100644 test/909-attach-agent/info.txt create mode 100644 test/909-attach-agent/interpreter-expected.patch create mode 100755 test/909-attach-agent/run create mode 100644 test/909-attach-agent/src-art/Main.java create mode 100644 test/910-methods/expected.txt create mode 100644 test/910-methods/info.txt create mode 100644 test/910-methods/methods.cc create mode 100755 test/910-methods/run create mode 100644 test/910-methods/src/Main.java create mode 100644 test/910-methods/src/art/Test910.java create mode 100644 test/911-get-stack-trace/expected-cts-version.txt create mode 100644 test/911-get-stack-trace/expected.txt create mode 100644 test/911-get-stack-trace/info.txt create mode 100755 test/911-get-stack-trace/run create mode 100644 test/911-get-stack-trace/src/Main.java create mode 100644 test/911-get-stack-trace/src/art/AllTraces.java create mode 100644 test/911-get-stack-trace/src/art/ControlData.java create mode 100644 test/911-get-stack-trace/src/art/Frames.java create mode 100644 test/911-get-stack-trace/src/art/OtherThread.java create mode 100644 test/911-get-stack-trace/src/art/PrintThread.java create mode 100644 test/911-get-stack-trace/src/art/Recurse.java create mode 100644 test/911-get-stack-trace/src/art/SameThread.java create mode 100644 test/911-get-stack-trace/src/art/Test911.java create mode 100644 test/911-get-stack-trace/src/art/ThreadListTraces.java create mode 100644 test/911-get-stack-trace/stack_trace.cc create mode 100644 test/912-classes/classes.cc create mode 100644 test/912-classes/classes_art.cc create mode 100644 test/912-classes/expected.txt create mode 100644 test/912-classes/info.txt create mode 100755 test/912-classes/run create mode 100644 test/912-classes/src-art/Main.java create mode 100644 test/912-classes/src-art/art/DexData.java create mode 100644 test/912-classes/src-art/art/Main.java create mode 100644 test/912-classes/src-art/art/Test912.java create mode 100644 test/912-classes/src-art/art/Test912Art.java create mode 100644 test/913-heaps/expected.txt create mode 100644 test/913-heaps/heaps.cc create mode 100644 test/913-heaps/info.txt create mode 100755 test/913-heaps/run create mode 100644 test/913-heaps/src/Main.java create mode 120000 test/913-heaps/src/art/Main.java create mode 100644 test/913-heaps/src/art/Test913.java create mode 100644 test/914-hello-obsolescence/expected.txt create mode 100644 test/914-hello-obsolescence/info.txt create mode 100755 test/914-hello-obsolescence/run create mode 100644 test/914-hello-obsolescence/src/Main.java create mode 120000 test/914-hello-obsolescence/src/art/Redefinition.java create mode 100644 test/914-hello-obsolescence/src/art/Test914.java create mode 100644 test/915-obsolete-2/expected.txt create mode 100644 test/915-obsolete-2/info.txt create mode 100755 test/915-obsolete-2/run create mode 100644 test/915-obsolete-2/src/Main.java create mode 120000 test/915-obsolete-2/src/art/Redefinition.java create mode 100644 test/915-obsolete-2/src/art/Test915.java create mode 100644 test/916-obsolete-jit/expected.txt create mode 100644 test/916-obsolete-jit/info.txt create mode 100755 test/916-obsolete-jit/run create mode 100644 test/916-obsolete-jit/src/Main.java create mode 100644 test/916-obsolete-jit/src/Transform.java create mode 120000 test/916-obsolete-jit/src/art/Redefinition.java create mode 100644 test/917-fields-transformation/expected.txt create mode 100644 test/917-fields-transformation/info.txt create mode 100755 test/917-fields-transformation/run create mode 100644 test/917-fields-transformation/src/Main.java create mode 120000 test/917-fields-transformation/src/art/Redefinition.java create mode 100644 test/917-fields-transformation/src/art/Test917.java create mode 100644 test/918-fields/expected.txt create mode 100644 test/918-fields/fields.cc create mode 100644 test/918-fields/info.txt create mode 100755 test/918-fields/run create mode 100644 test/918-fields/src/Main.java create mode 100644 test/918-fields/src/art/Test918.java create mode 100644 test/919-obsolete-fields/expected.txt create mode 100644 test/919-obsolete-fields/info.txt create mode 100755 test/919-obsolete-fields/run create mode 100644 test/919-obsolete-fields/src/Main.java create mode 120000 test/919-obsolete-fields/src/art/Redefinition.java create mode 100644 test/919-obsolete-fields/src/art/Test919.java create mode 100644 test/920-objects/expected.txt create mode 100644 test/920-objects/info.txt create mode 100644 test/920-objects/objects.cc create mode 100755 test/920-objects/run create mode 100644 test/920-objects/src/Main.java create mode 100644 test/920-objects/src/art/Test920.java create mode 100644 test/921-hello-failure/expected.txt create mode 100644 test/921-hello-failure/info.txt create mode 100755 test/921-hello-failure/run create mode 100644 test/921-hello-failure/src/CommonClassDefinition.java create mode 100644 test/921-hello-failure/src/DifferentAccess.java create mode 100644 test/921-hello-failure/src/FieldChange.java create mode 100644 test/921-hello-failure/src/Iface1.java create mode 100644 test/921-hello-failure/src/Iface2.java create mode 100644 test/921-hello-failure/src/Iface3.java create mode 100644 test/921-hello-failure/src/Iface4.java create mode 100644 test/921-hello-failure/src/Main.java create mode 100644 test/921-hello-failure/src/MethodChange.java create mode 100644 test/921-hello-failure/src/MissingField.java create mode 100644 test/921-hello-failure/src/MissingInterface.java create mode 100644 test/921-hello-failure/src/MissingMethod.java create mode 100644 test/921-hello-failure/src/MultiRedef.java create mode 100644 test/921-hello-failure/src/MultiRetrans.java create mode 100644 test/921-hello-failure/src/NewField.java create mode 100644 test/921-hello-failure/src/NewInterface.java create mode 100644 test/921-hello-failure/src/NewMethod.java create mode 100644 test/921-hello-failure/src/NewName.java create mode 100644 test/921-hello-failure/src/ReorderInterface.java create mode 100644 test/921-hello-failure/src/Transform.java create mode 100644 test/921-hello-failure/src/Transform2.java create mode 100644 test/921-hello-failure/src/Transform3.java create mode 100644 test/921-hello-failure/src/Transform4.java create mode 100644 test/921-hello-failure/src/Transform5.java create mode 100644 test/921-hello-failure/src/Undefault.java create mode 100644 test/921-hello-failure/src/Unmodifiable.java create mode 100644 test/921-hello-failure/src/Verification.java create mode 120000 test/921-hello-failure/src/art/Redefinition.java create mode 100644 test/922-properties/expected.txt create mode 100644 test/922-properties/info.txt create mode 100644 test/922-properties/properties.cc create mode 100755 test/922-properties/run create mode 100644 test/922-properties/src/Main.java create mode 100644 test/922-properties/src/art/Test922.java create mode 100644 test/923-monitors/expected.txt create mode 100644 test/923-monitors/info.txt create mode 100644 test/923-monitors/monitors.cc create mode 100755 test/923-monitors/run create mode 100644 test/923-monitors/src/Main.java create mode 100644 test/923-monitors/src/art/Test923.java create mode 100644 test/924-threads/expected.txt create mode 100644 test/924-threads/info.txt create mode 100755 test/924-threads/run create mode 100644 test/924-threads/src/Main.java create mode 100644 test/924-threads/src/art/Test924.java create mode 100644 test/924-threads/threads.cc create mode 100644 test/925-threadgroups/expected.txt create mode 100644 test/925-threadgroups/info.txt create mode 100755 test/925-threadgroups/run create mode 100644 test/925-threadgroups/src/Main.java create mode 100644 test/925-threadgroups/src/art/Test925.java create mode 100644 test/925-threadgroups/threadgroups.cc create mode 100644 test/926-multi-obsolescence/expected.txt create mode 100644 test/926-multi-obsolescence/info.txt create mode 100755 test/926-multi-obsolescence/run create mode 100644 test/926-multi-obsolescence/src/Main.java create mode 120000 test/926-multi-obsolescence/src/art/Redefinition.java create mode 100644 test/926-multi-obsolescence/src/art/Test926.java create mode 100644 test/927-timers/expected.txt create mode 100644 test/927-timers/info.txt create mode 100755 test/927-timers/run create mode 100644 test/927-timers/src/Main.java create mode 100644 test/927-timers/src/art/Test927.java create mode 100644 test/927-timers/timers.cc create mode 100644 test/928-jni-table/expected.txt create mode 100644 test/928-jni-table/info.txt create mode 100644 test/928-jni-table/jni_table.cc create mode 100755 test/928-jni-table/run create mode 100644 test/928-jni-table/src/Main.java create mode 100644 test/928-jni-table/src/art/Test928.java create mode 100644 test/929-search/expected.txt create mode 100644 test/929-search/info.txt create mode 100755 test/929-search/run create mode 100644 test/929-search/search.cc create mode 100644 test/929-search/src-ex/A.java create mode 100644 test/929-search/src/B.java create mode 100644 test/929-search/src/Main.java create mode 100644 test/930-hello-retransform/expected.txt create mode 100644 test/930-hello-retransform/info.txt create mode 100755 test/930-hello-retransform/run create mode 100644 test/930-hello-retransform/src/Main.java create mode 120000 test/930-hello-retransform/src/art/Redefinition.java create mode 100644 test/930-hello-retransform/src/art/Test930.java create mode 100644 test/931-agent-thread/agent_thread.cc create mode 100644 test/931-agent-thread/expected.txt create mode 100644 test/931-agent-thread/info.txt create mode 100755 test/931-agent-thread/run create mode 100644 test/931-agent-thread/src/Main.java create mode 100644 test/931-agent-thread/src/art/Test931.java create mode 100644 test/932-transform-saves/expected.txt create mode 100644 test/932-transform-saves/info.txt create mode 100755 test/932-transform-saves/run create mode 100644 test/932-transform-saves/src/Main.java create mode 120000 test/932-transform-saves/src/art/Redefinition.java create mode 100644 test/932-transform-saves/src/art/Test932.java create mode 100644 test/933-misc-events/expected.txt create mode 100644 test/933-misc-events/info.txt create mode 100644 test/933-misc-events/misc_events.cc create mode 100755 test/933-misc-events/run create mode 100644 test/933-misc-events/src/Main.java create mode 100644 test/933-misc-events/src/art/Test933.java create mode 100644 test/934-load-transform/expected.txt create mode 100644 test/934-load-transform/info.txt create mode 100755 test/934-load-transform/run create mode 100644 test/934-load-transform/src-ex/TestMain.java create mode 100644 test/934-load-transform/src-ex/Transform.java create mode 100644 test/934-load-transform/src/Main.java create mode 120000 test/934-load-transform/src/art/Redefinition.java create mode 100644 test/935-non-retransformable/expected.txt create mode 100644 test/935-non-retransformable/info.txt create mode 100755 test/935-non-retransformable/run create mode 100644 test/935-non-retransformable/src-ex/TestMain.java create mode 100644 test/935-non-retransformable/src-ex/Transform.java create mode 100644 test/935-non-retransformable/src/Main.java create mode 120000 test/935-non-retransformable/src/art/Redefinition.java create mode 100644 test/936-search-onload/expected.txt create mode 100644 test/936-search-onload/info.txt create mode 100755 test/936-search-onload/run create mode 100644 test/936-search-onload/search_onload.cc create mode 100644 test/936-search-onload/search_onload.h create mode 100644 test/936-search-onload/src-ex/A.java create mode 100644 test/936-search-onload/src/B.java create mode 100644 test/936-search-onload/src/Main.java create mode 100644 test/937-hello-retransform-package/expected.txt create mode 100644 test/937-hello-retransform-package/info.txt create mode 100755 test/937-hello-retransform-package/run create mode 100644 test/937-hello-retransform-package/src/Main.java create mode 100644 test/937-hello-retransform-package/src/Transform.java create mode 120000 test/937-hello-retransform-package/src/art/Redefinition.java create mode 100644 test/938-load-transform-bcp/expected.txt create mode 100644 test/938-load-transform-bcp/info.txt create mode 100755 test/938-load-transform-bcp/run create mode 100644 test/938-load-transform-bcp/src-ex/TestMain.java create mode 100644 test/938-load-transform-bcp/src/Main.java create mode 120000 test/938-load-transform-bcp/src/art/Redefinition.java create mode 100644 test/939-hello-transformation-bcp/expected.txt create mode 100644 test/939-hello-transformation-bcp/info.txt create mode 100755 test/939-hello-transformation-bcp/run create mode 100644 test/939-hello-transformation-bcp/src/Main.java create mode 120000 test/939-hello-transformation-bcp/src/art/Redefinition.java create mode 100644 test/940-recursive-obsolete/expected.txt create mode 100644 test/940-recursive-obsolete/info.txt create mode 100755 test/940-recursive-obsolete/run create mode 100644 test/940-recursive-obsolete/src/Main.java create mode 120000 test/940-recursive-obsolete/src/art/Redefinition.java create mode 100644 test/940-recursive-obsolete/src/art/Test940.java create mode 100644 test/941-recurive-obsolete-jit/expected.txt create mode 100644 test/941-recurive-obsolete-jit/info.txt create mode 100755 test/941-recurive-obsolete-jit/run create mode 100644 test/941-recurive-obsolete-jit/src/Main.java create mode 100644 test/941-recurive-obsolete-jit/src/Transform.java create mode 120000 test/941-recurive-obsolete-jit/src/art/Redefinition.java create mode 100644 test/942-private-recursive/expected.txt create mode 100644 test/942-private-recursive/info.txt create mode 100755 test/942-private-recursive/run create mode 100644 test/942-private-recursive/src/Main.java create mode 120000 test/942-private-recursive/src/art/Redefinition.java create mode 100644 test/942-private-recursive/src/art/Test942.java create mode 100644 test/943-private-recursive-jit/expected.txt create mode 100644 test/943-private-recursive-jit/info.txt create mode 100755 test/943-private-recursive-jit/run create mode 100644 test/943-private-recursive-jit/src/Main.java create mode 100644 test/943-private-recursive-jit/src/Transform.java create mode 120000 test/943-private-recursive-jit/src/art/Redefinition.java create mode 100644 test/944-transform-classloaders/expected.txt create mode 100644 test/944-transform-classloaders/info.txt create mode 100755 test/944-transform-classloaders/run create mode 100644 test/944-transform-classloaders/src/Main.java create mode 120000 test/944-transform-classloaders/src/art/Redefinition.java create mode 100644 test/944-transform-classloaders/src/art/Test944.java create mode 100644 test/945-obsolete-native/expected.txt create mode 100644 test/945-obsolete-native/info.txt create mode 100644 test/945-obsolete-native/obsolete_native.cc create mode 100755 test/945-obsolete-native/run create mode 100644 test/945-obsolete-native/src/Main.java create mode 120000 test/945-obsolete-native/src/art/Redefinition.java create mode 100644 test/945-obsolete-native/src/art/Test945.java create mode 100644 test/946-obsolete-throw/expected.txt create mode 100644 test/946-obsolete-throw/info.txt create mode 100755 test/946-obsolete-throw/run create mode 100644 test/946-obsolete-throw/src/Main.java create mode 120000 test/946-obsolete-throw/src/art/Redefinition.java create mode 100644 test/946-obsolete-throw/src/art/Test946.java create mode 100644 test/947-reflect-method/expected.txt create mode 100644 test/947-reflect-method/info.txt create mode 100755 test/947-reflect-method/run create mode 100644 test/947-reflect-method/src/Main.java create mode 120000 test/947-reflect-method/src/art/Redefinition.java create mode 100644 test/947-reflect-method/src/art/Test947.java create mode 100755 test/948-change-annotations/build create mode 100644 test/948-change-annotations/expected.txt create mode 100644 test/948-change-annotations/info.txt create mode 100755 test/948-change-annotations/run create mode 100644 test/948-change-annotations/src/AddAnnotationsTest.java create mode 100644 test/948-change-annotations/src/ChangeAnnotationValues.java create mode 100644 test/948-change-annotations/src/Main.java create mode 100644 test/948-change-annotations/src/RemoveAnnotationsTest.java create mode 100644 test/948-change-annotations/src/TestCase.java create mode 100644 test/948-change-annotations/src/TestClassAnnotation1.java create mode 100644 test/948-change-annotations/src/TestClassAnnotation2.java create mode 100644 test/948-change-annotations/src/TestMethodAnnotation1.java create mode 100644 test/948-change-annotations/src/TestMethodAnnotation2.java create mode 100644 test/948-change-annotations/src/Transform.java create mode 120000 test/948-change-annotations/src/art/Redefinition.java create mode 100644 test/949-in-memory-transform/expected.txt create mode 100644 test/949-in-memory-transform/info.txt create mode 100755 test/949-in-memory-transform/run create mode 100644 test/949-in-memory-transform/src/Main.java create mode 120000 test/949-in-memory-transform/src/art/Redefinition.java create mode 100644 test/949-in-memory-transform/src/art/Test949.java create mode 100644 test/950-redefine-intrinsic/expected.txt create mode 100644 test/950-redefine-intrinsic/info.txt create mode 100755 test/950-redefine-intrinsic/run create mode 100644 test/950-redefine-intrinsic/src/Main.java create mode 100644 test/950-redefine-intrinsic/src/RedefinedLongIntrinsics.java create mode 120000 test/950-redefine-intrinsic/src/art/Redefinition.java create mode 100644 test/951-threaded-obsolete/expected.txt create mode 100644 test/951-threaded-obsolete/info.txt create mode 100755 test/951-threaded-obsolete/run create mode 100644 test/951-threaded-obsolete/src/Main.java create mode 120000 test/951-threaded-obsolete/src/art/Redefinition.java create mode 100644 test/951-threaded-obsolete/src/art/Test951.java create mode 100755 test/952-invoke-custom/build create mode 100644 test/952-invoke-custom/expected.txt create mode 100644 test/952-invoke-custom/info.txt create mode 100644 test/952-invoke-custom/src/Main.java create mode 100644 test/952-invoke-custom/src/TestBadBootstrapArguments.java create mode 100644 test/952-invoke-custom/src/TestBase.java create mode 100644 test/952-invoke-custom/src/TestDynamicBootstrapArguments.java create mode 100644 test/952-invoke-custom/src/TestInvocationKinds.java create mode 100644 test/952-invoke-custom/src/TestInvokeCustomWithConcurrentThreads.java create mode 100644 test/952-invoke-custom/src/TestLinkerMethodMinimalArguments.java create mode 100644 test/952-invoke-custom/src/TestLinkerMethodMultipleArgumentTypes.java create mode 100644 test/952-invoke-custom/src/TestLinkerMethodWithRange.java create mode 100644 test/952-invoke-custom/src/TestLinkerUnrelatedBSM.java create mode 100644 test/952-invoke-custom/src/TestReturnValues.java create mode 100644 test/952-invoke-custom/src/TestVariableArityLinkerMethod.java create mode 100644 test/952-invoke-custom/src/UnrelatedBSM.java create mode 100644 test/952-invoke-custom/util-src/annotations/BootstrapMethod.java create mode 100644 test/952-invoke-custom/util-src/annotations/CalledByIndy.java create mode 100644 test/952-invoke-custom/util-src/annotations/Constant.java create mode 100644 test/952-invoke-custom/util-src/transformer/IndyTransformer.java create mode 100755 test/953-invoke-polymorphic-compiler/build create mode 100644 test/953-invoke-polymorphic-compiler/expected.txt create mode 100644 test/953-invoke-polymorphic-compiler/info.txt create mode 100644 test/953-invoke-polymorphic-compiler/src/Main.java create mode 100755 test/954-invoke-polymorphic-verifier/build create mode 100755 test/954-invoke-polymorphic-verifier/check create mode 100644 test/954-invoke-polymorphic-verifier/expected.txt create mode 100644 test/954-invoke-polymorphic-verifier/info.txt create mode 100644 test/954-invoke-polymorphic-verifier/smali/BadThis.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/BetterFakeSignaturePolymorphic.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/FakeSignaturePolymorphic.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/Main.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/MethodHandleNotInvoke.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/MethodHandleToString.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/NonReference.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/Subclass.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/TooFewArguments.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/TooManyArguments.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/Unresolved.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/VarHandleHappyAccessors.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/VarHandleUnhappyAccessors.smali create mode 100644 test/954-invoke-polymorphic-verifier/smali/VarHandleUnknownAccessor.smali create mode 100755 test/955-methodhandles-smali/build create mode 100644 test/955-methodhandles-smali/expected.txt create mode 100644 test/955-methodhandles-smali/info.txt create mode 100644 test/955-methodhandles-smali/smali/Main.smali create mode 100755 test/956-methodhandles/build create mode 100644 test/956-methodhandles/expected.txt create mode 100644 test/956-methodhandles/info.txt create mode 100644 test/956-methodhandles/src/Main.java create mode 100644 test/956-methodhandles/src/other/Chatty.java create mode 100755 test/957-methodhandle-transforms/build create mode 100644 test/957-methodhandle-transforms/expected.txt create mode 100644 test/957-methodhandle-transforms/info.txt create mode 100644 test/957-methodhandle-transforms/src/Main.java create mode 100755 test/958-methodhandle-stackframe/build create mode 100644 test/958-methodhandle-stackframe/expected.txt create mode 100644 test/958-methodhandle-stackframe/info.txt create mode 100644 test/958-methodhandle-stackframe/src-art/Main.java create mode 100644 test/959-invoke-polymorphic-accessors/build create mode 100644 test/959-invoke-polymorphic-accessors/expected.txt create mode 100644 test/959-invoke-polymorphic-accessors/info.txt create mode 100644 test/959-invoke-polymorphic-accessors/src/Main.java create mode 100755 test/960-default-smali/build create mode 100644 test/960-default-smali/expected.txt create mode 100644 test/960-default-smali/info.txt create mode 100644 test/960-default-smali/src/A.java create mode 100644 test/960-default-smali/src/Attendant.java create mode 100644 test/960-default-smali/src/B.java create mode 100644 test/960-default-smali/src/C.java create mode 100644 test/960-default-smali/src/D.java create mode 100644 test/960-default-smali/src/E.java create mode 100644 test/960-default-smali/src/Extension.java create mode 100644 test/960-default-smali/src/F.java create mode 100644 test/960-default-smali/src/Foo.java create mode 100644 test/960-default-smali/src/Foo2.java create mode 100644 test/960-default-smali/src/Foo3.java create mode 100644 test/960-default-smali/src/Fooer.java create mode 100644 test/960-default-smali/src/G.java create mode 100644 test/960-default-smali/src/Greeter.java create mode 100644 test/960-default-smali/src/Greeter2.java create mode 100644 test/960-default-smali/src/Greeter3.java create mode 100644 test/960-default-smali/src/H.java create mode 100644 test/960-default-smali/src/I.java create mode 100644 test/960-default-smali/src/J.java create mode 100644 test/960-default-smali/src/K.java create mode 100644 test/960-default-smali/src/L.java create mode 100644 test/960-default-smali/src/M.java create mode 100644 test/960-default-smali/src/N.java create mode 100644 test/960-default-smali/src/O.java create mode 100644 test/960-default-smali/src/P.java create mode 100644 test/960-default-smali/src/Q.java create mode 100644 test/960-default-smali/src/classes.xml create mode 100644 test/960-default-smali/src2/Foo.java create mode 100644 test/960-default-smali/src2/Foo3.java create mode 100755 test/961-default-iface-resolution-gen/build create mode 100644 test/961-default-iface-resolution-gen/expected.txt create mode 100644 test/961-default-iface-resolution-gen/info.txt create mode 100755 test/961-default-iface-resolution-gen/run create mode 100755 test/961-default-iface-resolution-gen/util-src/generate_java.py create mode 100644 test/962-iface-static/build create mode 100644 test/962-iface-static/expected.txt create mode 100644 test/962-iface-static/info.txt create mode 100644 test/962-iface-static/src/Displayer.java create mode 100644 test/962-iface-static/src/Iface.java create mode 100644 test/962-iface-static/src/Main.java create mode 100644 test/963-default-range-smali/expected.txt create mode 100644 test/963-default-range-smali/info.txt create mode 100644 test/963-default-range-smali/src/A.java create mode 100644 test/963-default-range-smali/src/Iface.java create mode 100644 test/963-default-range-smali/src/Main.java create mode 100755 test/964-default-iface-init-gen/build create mode 100644 test/964-default-iface-init-gen/expected.txt create mode 100644 test/964-default-iface-init-gen/info.txt create mode 100644 test/964-default-iface-init-gen/src/Displayer.java create mode 100755 test/964-default-iface-init-gen/util-src/generate_java.py create mode 100644 test/965-default-verify/expected.txt create mode 100644 test/965-default-verify/info.txt create mode 100644 test/965-default-verify/src/Iface.java create mode 100644 test/965-default-verify/src/Main.java create mode 100644 test/965-default-verify/src/Statics.java create mode 100644 test/965-default-verify/src2/Statics.java create mode 100644 test/966-default-conflict/expected.txt create mode 100644 test/966-default-conflict/info.txt create mode 100644 test/966-default-conflict/src/Iface.java create mode 100644 test/966-default-conflict/src/Iface2.java create mode 100644 test/966-default-conflict/src/Main.java create mode 100644 test/966-default-conflict/src2/Iface2.java create mode 100644 test/967-default-ame/expected.txt create mode 100644 test/967-default-ame/info.txt create mode 100644 test/967-default-ame/src/Iface.java create mode 100644 test/967-default-ame/src/Iface2.java create mode 100644 test/967-default-ame/src/Iface3.java create mode 100644 test/967-default-ame/src/Main.java create mode 100644 test/967-default-ame/src2/Iface.java create mode 100644 test/967-default-ame/src2/Iface2.java create mode 100644 test/967-default-ame/src2/Iface3.java create mode 100755 test/968-default-partial-compile-gen/build create mode 100644 test/968-default-partial-compile-gen/expected.txt create mode 100644 test/968-default-partial-compile-gen/info.txt create mode 100755 test/968-default-partial-compile-gen/util-src/generate_java.py create mode 100755 test/968-default-partial-compile-gen/util-src/generate_smali.py create mode 100755 test/969-iface-super/build create mode 100644 test/969-iface-super/expected.txt create mode 100644 test/969-iface-super/info.txt create mode 100644 test/969-iface-super/src/A.java create mode 100644 test/969-iface-super/src/B.java create mode 100644 test/969-iface-super/src/C.java create mode 100644 test/969-iface-super/src/D.java create mode 100644 test/969-iface-super/src/E.java create mode 100644 test/969-iface-super/src/F.java create mode 100644 test/969-iface-super/src/G.java create mode 100644 test/969-iface-super/src/H.java create mode 100644 test/969-iface-super/src/Iface.java create mode 100644 test/969-iface-super/src/Iface2.java create mode 100644 test/969-iface-super/src/Iface3.java create mode 100644 test/969-iface-super/src/classes.xml create mode 100755 test/970-iface-super-resolution-gen/build create mode 100644 test/970-iface-super-resolution-gen/expected.txt create mode 100644 test/970-iface-super-resolution-gen/info.txt create mode 100755 test/970-iface-super-resolution-gen/util-src/generate_java.py create mode 100755 test/970-iface-super-resolution-gen/util-src/generate_smali.py create mode 100755 test/971-iface-super/build create mode 100644 test/971-iface-super/expected.txt create mode 100644 test/971-iface-super/info.txt create mode 100755 test/971-iface-super/util-src/generate_java.py create mode 100755 test/971-iface-super/util-src/generate_smali.py create mode 100644 test/972-default-imt-collision/expected.txt create mode 100644 test/972-default-imt-collision/info.txt create mode 100644 test/972-default-imt-collision/smali/Iface1.smali create mode 100644 test/972-default-imt-collision/smali/Iface2.smali create mode 100644 test/972-default-imt-collision/smali/Klass.smali create mode 100644 test/972-default-imt-collision/src/Main.java create mode 100644 test/972-iface-super-multidex/expected.txt create mode 100644 test/972-iface-super-multidex/info.txt create mode 100644 test/972-iface-super-multidex/smali-multidex/conflictinterface.smali create mode 100644 test/972-iface-super-multidex/smali-multidex/oneconflict.smali create mode 100644 test/972-iface-super-multidex/smali-multidex/superinterface.smali create mode 100644 test/972-iface-super-multidex/smali-multidex/twoconflict.smali create mode 100644 test/972-iface-super-multidex/smali/concreteclass.smali create mode 100644 test/972-iface-super-multidex/src/Main.java create mode 100644 test/973-default-multidex/expected.txt create mode 100644 test/973-default-multidex/info.txt create mode 100644 test/973-default-multidex/smali-multidex/iface.smali create mode 100644 test/973-default-multidex/smali/concreteclass.smali create mode 100644 test/973-default-multidex/src/Main.java create mode 100644 test/974-verify-interface-super/expected.txt create mode 100644 test/974-verify-interface-super/info.txt create mode 100644 test/974-verify-interface-super/smali/base.smali create mode 100644 test/974-verify-interface-super/smali/iface.smali create mode 100644 test/974-verify-interface-super/smali/main.smali create mode 100755 test/975-iface-private/build create mode 100644 test/975-iface-private/expected.txt create mode 100644 test/975-iface-private/info.txt create mode 100644 test/975-iface-private/smali/Iface.smali create mode 100644 test/975-iface-private/smali/Main.smali create mode 100644 test/976-conflict-no-methods/expected.txt create mode 100644 test/976-conflict-no-methods/info.txt create mode 100644 test/976-conflict-no-methods/smali/Iface.smali create mode 100644 test/976-conflict-no-methods/smali/Main.smali create mode 100644 test/976-conflict-no-methods/smali/NoMethods.smali create mode 100755 test/978-virtual-interface/build create mode 100644 test/978-virtual-interface/expected.txt create mode 100644 test/978-virtual-interface/info.txt create mode 100644 test/978-virtual-interface/smali/Iface.smali create mode 100644 test/978-virtual-interface/smali/Main.smali create mode 100644 test/978-virtual-interface/smali/Subtype.smali create mode 100644 test/978-virtual-interface/smali/Target.smali create mode 100755 test/979-const-method-handle/build create mode 100644 test/979-const-method-handle/expected.txt create mode 100644 test/979-const-method-handle/info.txt create mode 100644 test/979-const-method-handle/src/Main.java create mode 100644 test/979-const-method-handle/util-src/annotations/ConstantMethodHandle.java create mode 100644 test/979-const-method-handle/util-src/annotations/ConstantMethodType.java create mode 100644 test/979-const-method-handle/util-src/transformer/ConstantTransformer.java create mode 100755 test/980-redefine-object/check create mode 100644 test/980-redefine-object/expected.txt create mode 100644 test/980-redefine-object/info.txt create mode 100644 test/980-redefine-object/redef_object.cc create mode 100755 test/980-redefine-object/run create mode 100644 test/980-redefine-object/src-ex/TestWatcher.java create mode 100644 test/980-redefine-object/src/Main.java create mode 100644 test/980-redefine-object/src/Transform.java create mode 100644 test/981-dedup-original-dex/expected.txt create mode 100644 test/981-dedup-original-dex/info.txt create mode 100755 test/981-dedup-original-dex/run create mode 100644 test/981-dedup-original-dex/src-art/Main.java create mode 100644 test/981-dedup-original-dex/src-art/art/Redefinition.java create mode 100644 test/981-dedup-original-dex/src-art/art/Test981.java create mode 100644 test/982-ok-no-retransform/expected.txt create mode 100644 test/982-ok-no-retransform/info.txt create mode 100755 test/982-ok-no-retransform/run create mode 100644 test/982-ok-no-retransform/src/Main.java create mode 120000 test/982-ok-no-retransform/src/art/Redefinition.java create mode 100644 test/982-ok-no-retransform/src/art/Test982.java create mode 100644 test/983-source-transform-verify/expected.txt create mode 100644 test/983-source-transform-verify/info.txt create mode 100755 test/983-source-transform-verify/run create mode 100644 test/983-source-transform-verify/source_transform.cc create mode 100644 test/983-source-transform-verify/source_transform.h create mode 100644 test/983-source-transform-verify/source_transform_art.cc create mode 100644 test/983-source-transform-verify/source_transform_slicer.cc create mode 100644 test/983-source-transform-verify/src/Main.java create mode 120000 test/983-source-transform-verify/src/art/Redefinition.java create mode 100644 test/983-source-transform-verify/src/art/Test983.java create mode 100644 test/984-obsolete-invoke/expected.txt create mode 100644 test/984-obsolete-invoke/info.txt create mode 100644 test/984-obsolete-invoke/obsolete_invoke.cc create mode 100755 test/984-obsolete-invoke/run create mode 100644 test/984-obsolete-invoke/src/Main.java create mode 120000 test/984-obsolete-invoke/src/art/Redefinition.java create mode 100644 test/984-obsolete-invoke/src/art/Test984.java create mode 100644 test/985-re-obsolete/expected.txt create mode 100644 test/985-re-obsolete/info.txt create mode 100755 test/985-re-obsolete/run create mode 100644 test/985-re-obsolete/src/Main.java create mode 120000 test/985-re-obsolete/src/art/Redefinition.java create mode 100644 test/985-re-obsolete/src/art/Test985.java create mode 100644 test/986-native-method-bind/expected.txt create mode 100644 test/986-native-method-bind/info.txt create mode 100644 test/986-native-method-bind/native_bind.cc create mode 100755 test/986-native-method-bind/run create mode 100644 test/986-native-method-bind/src/Main.java create mode 100644 test/986-native-method-bind/src/art/Test986.java create mode 100644 test/987-agent-bind/agent_bind.cc create mode 100644 test/987-agent-bind/expected.txt create mode 100644 test/987-agent-bind/info.txt create mode 100755 test/987-agent-bind/run create mode 100644 test/987-agent-bind/src/Main.java create mode 100644 test/987-agent-bind/src/art/Test987.java create mode 100644 test/988-method-trace/expected.txt create mode 100755 test/988-method-trace/gen_srcs.py create mode 100644 test/988-method-trace/info.txt create mode 100755 test/988-method-trace/run create mode 100644 test/988-method-trace/src/Main.java create mode 100644 test/988-method-trace/src/art/Test988.java create mode 100644 test/988-method-trace/src/art/Test988Intrinsics.java create mode 120000 test/988-method-trace/src/art/Trace.java create mode 100644 test/988-method-trace/trace_fib.cc create mode 100644 test/989-method-trace-throw/expected.txt create mode 100644 test/989-method-trace-throw/info.txt create mode 100644 test/989-method-trace-throw/method_trace.cc create mode 100755 test/989-method-trace-throw/run create mode 100644 test/989-method-trace-throw/src/Main.java create mode 100644 test/989-method-trace-throw/src/art/Test989.java create mode 120000 test/989-method-trace-throw/src/art/Trace.java create mode 100644 test/990-field-trace/expected.txt create mode 100644 test/990-field-trace/info.txt create mode 100755 test/990-field-trace/run create mode 100644 test/990-field-trace/src/Main.java create mode 100644 test/990-field-trace/src/art/Test990.java create mode 120000 test/990-field-trace/src/art/Trace.java create mode 100644 test/991-field-trace-2/expected.txt create mode 100644 test/991-field-trace-2/field_trace.cc create mode 100644 test/991-field-trace-2/info.txt create mode 100755 test/991-field-trace-2/run create mode 100644 test/991-field-trace-2/src/Main.java create mode 100644 test/991-field-trace-2/src/art/Test991.java create mode 120000 test/991-field-trace-2/src/art/Trace.java create mode 100644 test/992-source-data/expected.txt create mode 100644 test/992-source-data/info.txt create mode 100755 test/992-source-data/run create mode 100644 test/992-source-data/source_file.cc create mode 100644 test/992-source-data/src/Main.java create mode 100644 test/992-source-data/src/art/Target2.java create mode 100644 test/992-source-data/src/art/Test992.java create mode 100644 test/993-breakpoints/breakpoints.cc create mode 100644 test/993-breakpoints/expected.txt create mode 100644 test/993-breakpoints/info.txt create mode 100755 test/993-breakpoints/run create mode 100644 test/993-breakpoints/src/Main.java create mode 120000 test/993-breakpoints/src/art/Breakpoint.java create mode 100644 test/993-breakpoints/src/art/Test993.java create mode 100644 test/994-breakpoint-line/expected.txt create mode 100644 test/994-breakpoint-line/info.txt create mode 100755 test/994-breakpoint-line/run create mode 100644 test/994-breakpoint-line/src/Main.java create mode 120000 test/994-breakpoint-line/src/art/Breakpoint.java create mode 100644 test/994-breakpoint-line/src/art/Test994.java create mode 100644 test/995-breakpoints-throw/expected.txt create mode 100644 test/995-breakpoints-throw/info.txt create mode 100755 test/995-breakpoints-throw/run create mode 100644 test/995-breakpoints-throw/src/Main.java create mode 120000 test/995-breakpoints-throw/src/art/Breakpoint.java create mode 100644 test/995-breakpoints-throw/src/art/Test995.java create mode 100644 test/996-breakpoint-obsolete/expected.txt create mode 100644 test/996-breakpoint-obsolete/info.txt create mode 100644 test/996-breakpoint-obsolete/obsolete_breakpoints.cc create mode 100755 test/996-breakpoint-obsolete/run create mode 100644 test/996-breakpoint-obsolete/src/Main.java create mode 120000 test/996-breakpoint-obsolete/src/art/Breakpoint.java create mode 120000 test/996-breakpoint-obsolete/src/art/Redefinition.java create mode 100644 test/996-breakpoint-obsolete/src/art/Test996.java create mode 100644 test/997-single-step/expected.txt create mode 100644 test/997-single-step/info.txt create mode 100755 test/997-single-step/run create mode 100644 test/997-single-step/src/Main.java create mode 120000 test/997-single-step/src/art/Breakpoint.java create mode 100644 test/997-single-step/src/art/Test997.java create mode 120000 test/997-single-step/src/art/Trace.java create mode 100644 test/998-redefine-use-after-free/expected.txt create mode 100644 test/998-redefine-use-after-free/info.txt create mode 100755 test/998-redefine-use-after-free/run create mode 100644 test/998-redefine-use-after-free/src-ex/DexCacheSmash.java create mode 100644 test/998-redefine-use-after-free/src-ex/art/Redefinition.java create mode 100644 test/998-redefine-use-after-free/src/Main.java create mode 100644 test/999-redefine-hiddenapi/build create mode 100644 test/999-redefine-hiddenapi/expected.txt create mode 100644 test/999-redefine-hiddenapi/hiddenapi-flags.csv create mode 100644 test/999-redefine-hiddenapi/info.txt create mode 100755 test/999-redefine-hiddenapi/run create mode 100644 test/999-redefine-hiddenapi/src-ex/Test999.java create mode 100644 test/999-redefine-hiddenapi/src-redefine/art/Test999.java create mode 100755 test/999-redefine-hiddenapi/src-redefine/gen.sh create mode 100644 test/999-redefine-hiddenapi/src/Main.java create mode 120000 test/999-redefine-hiddenapi/src/art/Redefinition.java create mode 100644 test/AbstractMethod/AbstractClass.java create mode 100644 test/AllFields/AllFields.java create mode 100644 test/AllFields/AllFieldsSub.java create mode 100644 test/AllFields/AllFieldsUnrelated.java create mode 100644 test/Android.bp create mode 100644 test/Android.run-test.mk create mode 100644 test/DefaultMethods/IterableBase.java create mode 100644 test/Dex2oatVdexTestDex/Dex2oatVdexTestDex.java create mode 100644 test/DexToDexDecompiler/Main.java create mode 100644 test/ErroneousA/ErroneousA.java create mode 100644 test/ErroneousB/ErroneousB.java create mode 100644 test/ErroneousInit/ErroneousInit.java create mode 100644 test/ExceptionHandle/ExceptionHandle.java create mode 100644 test/Extension1/ExtensionClass1.java create mode 100644 test/Extension2/ExtensionClass2.java create mode 100644 test/ForClassLoaderA/Classes.java create mode 100644 test/ForClassLoaderB/Classes.java create mode 100644 test/ForClassLoaderC/Classes.java create mode 100644 test/ForClassLoaderD/Classes.java create mode 100644 test/GetMethodSignature/GetMethodSignature.java create mode 100644 test/HiddenApi/AbstractPackageClass.java create mode 100644 test/HiddenApi/Main.java create mode 100644 test/HiddenApi/PackageClass.java create mode 100644 test/HiddenApi/PublicInterface.java create mode 100644 test/HiddenApiSignatures/Class1.java create mode 100644 test/HiddenApiSignatures/Class12.java create mode 100644 test/HiddenApiSignatures/Class2.java create mode 100644 test/HiddenApiSignatures/Class3.java create mode 100644 test/HiddenApiSignatures/Interface.java create mode 100644 test/HiddenApiStubs/HiddenApi create mode 100644 test/HiddenApiStubs/PublicInterface.java create mode 100644 test/IMTA/Interfaces.java create mode 100644 test/IMTB/Interfaces.java create mode 100644 test/ImageLayoutA/ImageLayoutA.java create mode 100644 test/ImageLayoutB/ImageLayoutB.java create mode 100644 test/Instrumentation/Instrumentation.java create mode 100644 test/Interfaces/Interfaces.java create mode 100644 test/Lookup/A.java create mode 100644 test/Lookup/AB.java create mode 100644 test/Lookup/C.java create mode 100644 test/Main/Main.java create mode 100644 test/Main/empty.dex create mode 100644 test/ManyMethods/ManyMethods.java create mode 100644 test/MethodTypes/MethodTypes.java create mode 100644 test/MultiDex/Main.java create mode 100644 test/MultiDex/Second.java create mode 100644 test/MultiDex/main.jpp create mode 100644 test/MultiDex/main.list create mode 100644 test/MultiDexModifiedSecondary/Main.java create mode 100644 test/MultiDexModifiedSecondary/README.txt create mode 100644 test/MultiDexModifiedSecondary/Second.java create mode 100644 test/MultiDexModifiedSecondary/main.jpp create mode 100644 test/MultiDexModifiedSecondary/main.list create mode 100644 test/MyClass/MyClass.java create mode 100644 test/MyClassNatives/MyClassNatives.java create mode 100644 test/Nested/Nested.java create mode 100644 test/NonStaticLeafMethods/NonStaticLeafMethods.java create mode 100644 test/Packages/Package1.java create mode 100644 test/Packages/Package2.java create mode 100644 test/ProfileTestMultiDex/Main.java create mode 100644 test/ProfileTestMultiDex/Second.java create mode 100644 test/ProfileTestMultiDex/main.jpp create mode 100644 test/ProfileTestMultiDex/main.list create mode 100644 test/ProtoCompare/ProtoCompare.java create mode 100644 test/ProtoCompare2/ProtoCompare2.java create mode 100644 test/README.chroot.md create mode 100644 test/README.md create mode 100644 test/StaticLeafMethods/StaticLeafMethods.java create mode 100644 test/Statics/Statics.java create mode 100644 test/StaticsFromCode/StaticsFromCode.java create mode 100644 test/StringLiterals/StringLiterals.java create mode 100644 test/Transaction/InstanceFieldsTest.java create mode 100644 test/Transaction/StaticArrayFieldsTest.java create mode 100644 test/Transaction/StaticFieldsTest.java create mode 100644 test/Transaction/Transaction.java create mode 100644 test/VerifierDeps/Iface.smali create mode 100644 test/VerifierDeps/Main.smali create mode 100644 test/VerifierDeps/MyClassExtendingInterface.smali create mode 100644 test/VerifierDeps/MyClassWithNoSuper.smali create mode 100644 test/VerifierDeps/MyClassWithNoSuperButFailures.smali create mode 100644 test/VerifierDeps/MyDOMResult.smali create mode 100644 test/VerifierDeps/MyDocument.smali create mode 100644 test/VerifierDeps/MyErroneousTimeZone.smali create mode 100644 test/VerifierDeps/MyResult.smali create mode 100644 test/VerifierDeps/MySSLSocket.smali create mode 100644 test/VerifierDeps/MySimpleTimeZone.smali create mode 100644 test/VerifierDeps/MySocketTimeoutException.smali create mode 100644 test/VerifierDeps/MySub1SoftVerificationFailure.smali create mode 100644 test/VerifierDeps/MySub2SoftVerificationFailure.smali create mode 100644 test/VerifierDeps/MyThread.smali create mode 100644 test/VerifierDeps/MyThreadSet.smali create mode 100644 test/VerifierDeps/MyVerificationFailure.smali create mode 100644 test/VerifierDeps/SocketTimeoutException.smali create mode 100644 test/VerifierDepsMulti/MySoftVerificationFailure.smali create mode 100644 test/VerifySoftFailDuringClinit/ClassToInitialize.smali create mode 100644 test/VerifySoftFailDuringClinit/VerifySoftFail.smali create mode 100644 test/XandY/X.java create mode 100644 test/XandY/Y.java create mode 100644 test/common/runtime_state.cc create mode 100644 test/common/stack_inspect.cc create mode 100644 test/dexdump/all.dex create mode 100644 test/dexdump/all.lst create mode 100644 test/dexdump/all.txt create mode 100644 test/dexdump/all.xml create mode 100755 test/dexdump/bytecodes.dex create mode 100644 test/dexdump/bytecodes.lst create mode 100644 test/dexdump/bytecodes.txt create mode 100755 test/dexdump/bytecodes.xml create mode 100755 test/dexdump/checkers.dex create mode 100644 test/dexdump/checkers.lst create mode 100644 test/dexdump/checkers.txt create mode 100755 test/dexdump/checkers.xml create mode 100644 test/dexdump/const-method-handle.dex create mode 100644 test/dexdump/const-method-handle.lst create mode 100644 test/dexdump/const-method-handle.txt create mode 100644 test/dexdump/const-method-handle.xml create mode 100644 test/dexdump/invoke-custom.dex create mode 100644 test/dexdump/invoke-custom.lst create mode 100644 test/dexdump/invoke-custom.txt create mode 100644 test/dexdump/invoke-custom.xml create mode 100644 test/dexdump/invoke-polymorphic.dex create mode 100644 test/dexdump/invoke-polymorphic.lst create mode 100644 test/dexdump/invoke-polymorphic.txt create mode 100644 test/dexdump/invoke-polymorphic.xml create mode 100755 test/dexdump/run-all-tests create mode 100644 test/dexdump/staticfields.dex create mode 100644 test/dexdump/staticfields.lst create mode 100644 test/dexdump/staticfields.txt create mode 100644 test/dexdump/staticfields.xml create mode 100644 test/dexdump/values.dex create mode 100644 test/dexdump/values.lst create mode 100644 test/dexdump/values.txt create mode 100644 test/dexdump/values.xml create mode 100755 test/etc/default-build create mode 100755 test/etc/default-check create mode 100755 test/etc/default-run create mode 100755 test/etc/run-test-jar create mode 100644 test/jvmti-common/Breakpoint.java create mode 100644 test/jvmti-common/Exceptions.java create mode 100644 test/jvmti-common/FramePop.java create mode 100644 test/jvmti-common/Locals.java create mode 100644 test/jvmti-common/Main.java create mode 100644 test/jvmti-common/Monitors.java create mode 100644 test/jvmti-common/NonStandardExit.java create mode 100644 test/jvmti-common/Redefinition.java create mode 100644 test/jvmti-common/StackTrace.java create mode 100644 test/jvmti-common/SuspendEvents.java create mode 100644 test/jvmti-common/Suspension.java create mode 100644 test/jvmti-common/Threads.java create mode 100644 test/jvmti-common/Trace.java create mode 100644 test/knownfailures.json create mode 100755 test/run-test create mode 100644 test/testrunner/device_config.py create mode 100644 test/testrunner/env.py create mode 100755 test/testrunner/run_build_test_target.py create mode 100644 test/testrunner/target_config.py create mode 100755 test/testrunner/testrunner.py create mode 100644 test/ti-agent/agent_common.cc create mode 100644 test/ti-agent/agent_startup.cc create mode 100644 test/ti-agent/breakpoint_helper.cc create mode 100644 test/ti-agent/common_helper.cc create mode 100644 test/ti-agent/common_helper.h create mode 100644 test/ti-agent/common_load.cc create mode 100644 test/ti-agent/early_return_helper.cc create mode 100644 test/ti-agent/exceptions_helper.cc create mode 100644 test/ti-agent/frame_pop_helper.cc create mode 100644 test/ti-agent/jni_binder.cc create mode 100644 test/ti-agent/jni_binder.h create mode 100644 test/ti-agent/jni_helper.h create mode 100644 test/ti-agent/jvmti_helper.cc create mode 100644 test/ti-agent/jvmti_helper.h create mode 100644 test/ti-agent/locals_helper.cc create mode 100644 test/ti-agent/monitors_helper.cc create mode 100644 test/ti-agent/redefinition_helper.cc create mode 100644 test/ti-agent/scoped_local_ref.h create mode 100644 test/ti-agent/scoped_primitive_array.h create mode 100644 test/ti-agent/scoped_utf_chars.h create mode 100644 test/ti-agent/stack_trace_helper.cc create mode 100644 test/ti-agent/suspend_event_helper.cc create mode 100644 test/ti-agent/suspend_event_helper.h create mode 100644 test/ti-agent/suspension_helper.cc create mode 100644 test/ti-agent/test_env.cc create mode 100644 test/ti-agent/test_env.h create mode 100644 test/ti-agent/threads_helper.cc create mode 100644 test/ti-agent/ti_macros.h create mode 100644 test/ti-agent/ti_utf.h create mode 100644 test/ti-agent/trace_helper.cc create mode 100644 test/ti-stress/stress.cc create mode 100755 test/utils/get-device-isa create mode 100755 test/utils/get-device-test-native-lib-path create mode 100755 test/utils/python/generate_java_main.py create mode 100644 test/utils/python/testgen/mixins.py create mode 100644 test/utils/python/testgen/utils.py create mode 100644 tools/Android.bp create mode 100755 tools/add_package_property.sh create mode 100644 tools/ahat/Android.bp create mode 100644 tools/ahat/Android.mk create mode 100644 tools/ahat/README.txt create mode 100644 tools/ahat/TEST_MAPPING create mode 100755 tools/ahat/ahat create mode 100644 tools/ahat/ahat-tests.xml create mode 100644 tools/ahat/etc/L.hprof create mode 100644 tools/ahat/etc/O.hprof create mode 100644 tools/ahat/etc/README.txt create mode 100644 tools/ahat/etc/RI.hprof create mode 100644 tools/ahat/etc/ahat-tests.mf create mode 100644 tools/ahat/etc/ahat.mf create mode 100644 tools/ahat/etc/ahat_api.txt create mode 100644 tools/ahat/etc/ahat_api_msg.txt create mode 100644 tools/ahat/etc/ahat_removed_api.txt create mode 100644 tools/ahat/etc/hprofdump.py create mode 100644 tools/ahat/etc/style.css create mode 100644 tools/ahat/etc/test-dump.pro create mode 100644 tools/ahat/src/main/com/android/ahat/AhatHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/AhatHttpHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/AsciiProgress.java create mode 100644 tools/ahat/src/main/com/android/ahat/BitmapHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/Column.java create mode 100644 tools/ahat/src/main/com/android/ahat/Doc.java create mode 100644 tools/ahat/src/main/com/android/ahat/DocString.java create mode 100644 tools/ahat/src/main/com/android/ahat/DominatedList.java create mode 100644 tools/ahat/src/main/com/android/ahat/HeapTable.java create mode 100644 tools/ahat/src/main/com/android/ahat/HtmlDoc.java create mode 100644 tools/ahat/src/main/com/android/ahat/HtmlEscaper.java create mode 100644 tools/ahat/src/main/com/android/ahat/Main.java create mode 100644 tools/ahat/src/main/com/android/ahat/Menu.java create mode 100644 tools/ahat/src/main/com/android/ahat/ObjectHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/ObjectsHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/OverviewHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/Query.java create mode 100644 tools/ahat/src/main/com/android/ahat/RootedHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/SiteHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/SitePrinter.java create mode 100644 tools/ahat/src/main/com/android/ahat/SizeTable.java create mode 100644 tools/ahat/src/main/com/android/ahat/StaticHandler.java create mode 100644 tools/ahat/src/main/com/android/ahat/SubsetSelector.java create mode 100644 tools/ahat/src/main/com/android/ahat/Summarizer.java create mode 100644 tools/ahat/src/main/com/android/ahat/dominators/Dominators.java create mode 100644 tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatHeap.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Diff.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/DiffFields.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Diffable.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Field.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/FieldValue.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Instances.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Parser.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/PathElement.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Reference.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/RootType.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Site.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Size.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/SkipNullsIterator.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Sort.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Type.java create mode 100644 tools/ahat/src/main/com/android/ahat/heapdump/Value.java create mode 100644 tools/ahat/src/main/com/android/ahat/progress/NullProgress.java create mode 100644 tools/ahat/src/main/com/android/ahat/progress/Progress.java create mode 100644 tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java create mode 100644 tools/ahat/src/ri-test-dump/DumpedStuff.java create mode 100644 tools/ahat/src/ri-test-dump/Main.java create mode 100644 tools/ahat/src/test-dump/DumpedStuff.java create mode 100644 tools/ahat/src/test-dump/Main.java create mode 100644 tools/ahat/src/test-dump/SuperDumpedStuff.java create mode 100644 tools/ahat/src/test-dump/android/os/Binder.java create mode 100644 tools/ahat/src/test-dump/android/os/BinderProxy.java create mode 100644 tools/ahat/src/test-dump/android/os/IBinder.java create mode 100644 tools/ahat/src/test/com/android/ahat/AhatTestSuite.java create mode 100644 tools/ahat/src/test/com/android/ahat/DiffFieldsTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/DiffTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/DominatorsTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/HtmlEscaperTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/InstanceTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/NativeAllocationTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/ObjectHandlerTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/ObjectsHandlerTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/PerformanceTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/ProguardMapTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/QueryTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/RiTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/RootedHandlerTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/SiteHandlerTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/SiteTest.java create mode 100644 tools/ahat/src/test/com/android/ahat/TestDump.java create mode 100644 tools/ahat/src/test/com/android/ahat/TestHandler.java create mode 100755 tools/analyze-init-failures.py create mode 100644 tools/art create mode 100644 tools/art_verifier/Android.bp create mode 100644 tools/art_verifier/art_verifier.cc create mode 100644 tools/asan.sh create mode 100644 tools/bisection_search/README.md create mode 100644 tools/bisection_search/__init__.py create mode 100755 tools/bisection_search/bisection_search.py create mode 100755 tools/bisection_search/bisection_test.py create mode 100755 tools/boot-image-profile-configure-device.sh create mode 100755 tools/boot-image-profile-extract-profile.sh create mode 100755 tools/boot-image-profile-generate.sh create mode 100755 tools/bootjars.sh create mode 100644 tools/build/var_cache.py create mode 100755 tools/build/var_cache.sh create mode 100644 tools/build/var_list create mode 100755 tools/build_linux_bionic.sh create mode 100755 tools/build_linux_bionic_tests.sh create mode 100755 tools/buildbot-build.sh create mode 100755 tools/buildbot-cleanup-device.sh create mode 100755 tools/buildbot-setup-device.sh create mode 100755 tools/buildbot-symbolize-crashes.sh create mode 100755 tools/buildbot-sync.sh create mode 100755 tools/buildbot-teardown-device.sh create mode 100644 tools/checker/README create mode 100755 tools/checker/checker.py create mode 100644 tools/checker/common/__init__.py create mode 100644 tools/checker/common/archs.py create mode 100644 tools/checker/common/immutables.py create mode 100644 tools/checker/common/logger.py create mode 100644 tools/checker/common/mixins.py create mode 100644 tools/checker/common/testing.py create mode 100644 tools/checker/file_format/__init__.py create mode 100644 tools/checker/file_format/c1visualizer/__init__.py create mode 100644 tools/checker/file_format/c1visualizer/parser.py create mode 100644 tools/checker/file_format/c1visualizer/struct.py create mode 100644 tools/checker/file_format/c1visualizer/test.py create mode 100644 tools/checker/file_format/checker/__init__.py create mode 100644 tools/checker/file_format/checker/parser.py create mode 100644 tools/checker/file_format/checker/struct.py create mode 100644 tools/checker/file_format/checker/test.py create mode 100644 tools/checker/file_format/common.py create mode 100644 tools/checker/match/__init__.py create mode 100644 tools/checker/match/file.py create mode 100644 tools/checker/match/line.py create mode 100644 tools/checker/match/test.py create mode 100755 tools/checker/run_unit_tests.py create mode 100644 tools/class2greylist/Android.bp create mode 100644 tools/class2greylist/src/class2greylist.mf create mode 100644 tools/class2greylist/src/com/android/class2greylist/AlternativeNotFoundError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotationConsumer.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotationHandler.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/ApiComponents.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/ApiResolver.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/ClassAlternativeNotFoundError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/ErrorReporter.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/HiddenapiFlagsWriter.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/JarReader.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/JavadocLinkSyntaxError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/MemberAlternativeNotFoundError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/MultipleAlternativesFoundError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/NoAlternativesSpecifiedError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/PackageAndClassName.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/RequiredAlternativeNotSpecifiedError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/SignatureSyntaxError.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/Status.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/StringCursor.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/StringCursorOutOfBoundsException.java create mode 100644 tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java create mode 100644 tools/class2greylist/test/Android.bp create mode 100644 tools/class2greylist/test/AndroidTest.xml create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/AnnotationHandlerTestBase.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/AnnotationPropertyWriterTest.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/ApiComponentsTest.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/ApiResolverTest.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java create mode 100644 tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java create mode 100644 tools/common/__init__.py create mode 100755 tools/common/common.py create mode 100755 tools/compile-classes.sh create mode 100755 tools/compile-jar.sh create mode 100644 tools/cpp-define-generator/Android.bp create mode 100644 tools/cpp-define-generator/art_field.def create mode 100644 tools/cpp-define-generator/art_method.def create mode 100644 tools/cpp-define-generator/asm_defines.cc create mode 100644 tools/cpp-define-generator/asm_defines.def create mode 100644 tools/cpp-define-generator/code_item.def create mode 100644 tools/cpp-define-generator/globals.def create mode 100644 tools/cpp-define-generator/lockword.def create mode 100755 tools/cpp-define-generator/make_header.py create mode 100755 tools/cpp-define-generator/make_header_test.py create mode 100644 tools/cpp-define-generator/mirror_array.def create mode 100644 tools/cpp-define-generator/mirror_class.def create mode 100644 tools/cpp-define-generator/mirror_dex_cache.def create mode 100644 tools/cpp-define-generator/mirror_object.def create mode 100644 tools/cpp-define-generator/mirror_string.def create mode 100644 tools/cpp-define-generator/osr.def create mode 100644 tools/cpp-define-generator/profiling_info.def create mode 100644 tools/cpp-define-generator/rosalloc.def create mode 100644 tools/cpp-define-generator/runtime.def create mode 100644 tools/cpp-define-generator/shadow_frame.def create mode 100644 tools/cpp-define-generator/thread.def create mode 100644 tools/dexanalyze/Android.bp create mode 100644 tools/dexanalyze/dexanalyze.cc create mode 100644 tools/dexanalyze/dexanalyze_bytecode.cc create mode 100644 tools/dexanalyze/dexanalyze_bytecode.h create mode 100644 tools/dexanalyze/dexanalyze_experiments.cc create mode 100644 tools/dexanalyze/dexanalyze_experiments.h create mode 100644 tools/dexanalyze/dexanalyze_strings.cc create mode 100644 tools/dexanalyze/dexanalyze_strings.h create mode 100644 tools/dexanalyze/dexanalyze_test.cc create mode 100644 tools/dexfuzz/Android.bp create mode 100644 tools/dexfuzz/Android.mk create mode 100644 tools/dexfuzz/README create mode 100755 tools/dexfuzz/dexfuzz create mode 100644 tools/dexfuzz/manifest.txt create mode 100644 tools/dexfuzz/src/dexfuzz/DexFuzz.java create mode 100644 tools/dexfuzz/src/dexfuzz/ExecutionResult.java create mode 100644 tools/dexfuzz/src/dexfuzz/Log.java create mode 100644 tools/dexfuzz/src/dexfuzz/MutationStats.java create mode 100644 tools/dexfuzz/src/dexfuzz/Options.java create mode 100644 tools/dexfuzz/src/dexfuzz/StreamConsumer.java create mode 100644 tools/dexfuzz/src/dexfuzz/Timer.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/Architecture.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/Arm64InterpreterExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/ArmInterpreterExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/Device.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/Executor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/X86InterpreterExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/X86_64InterpreterExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerMultiple.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerMultipleExecute.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerMultipleNoExecute.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerSingle.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerSingleExecute.java create mode 100644 tools/dexfuzz/src/dexfuzz/fuzzers/FuzzerSingleNoExecute.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/BaseListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/BisectionSearchListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/ConsoleLoggerListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/FinalStatusListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/LogFileListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/MultiplexerListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/UniqueProgramTrackerListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/listeners/UpdatingConsoleListener.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/CodeTranslator.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/IdCreator.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MBranchInsn.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MInsn.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MInsnWithData.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MSwitchInsn.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MTryBlock.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MutatableCode.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/Mutation.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/MutationSerializer.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/Program.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/ArithOpChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/BranchShifter.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/CmpBiasChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/CodeMutator.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/ConstantValueChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/ConversionRepeater.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/FieldFlagChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/IfBranchChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/InstructionDeleter.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/InstructionDuplicator.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/InstructionSwapper.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/InvokeChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/NewInstanceChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/NewMethodCaller.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/NonsenseStringPrinter.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/OppositeBranchChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/PoolIndexChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/RandomBranchChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/RandomInstructionGenerator.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/SwitchBranchShifter.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/TryBlockShifter.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/VRegChanger.java create mode 100644 tools/dexfuzz/src/dexfuzz/program/mutators/ValuePrinter.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationElement.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationOffItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationSetItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationSetRefItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationSetRefList.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/AnnotationsDirectoryItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/ClassDataItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/ClassDefItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/CodeItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/DebugInfoItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/DexRandomAccessFile.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedAnnotation.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedArray.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedArrayItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedCatchHandler.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedCatchHandlerList.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedField.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedMethod.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedTypeAddrPair.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/EncodedValue.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/FieldAnnotation.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/FieldIdItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/HeaderItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/Instruction.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/MapItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/MapList.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/MethodAnnotation.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/MethodIdItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/Offset.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/OffsetTracker.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/Offsettable.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/Opcode.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/OpcodeInfo.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/ParameterAnnotation.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/ProtoIdItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/RawDexFile.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/RawDexObject.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/StringDataItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/StringIdItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/TryItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/TypeIdItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/TypeItem.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/TypeList.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/AbstractFormat.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/ContainsConst.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/ContainsPoolIndex.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/ContainsTarget.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/ContainsVRegs.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format00x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format1.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format10t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format10x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format11n.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format11x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format12x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format2.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format20bc.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format20t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format21c.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format21h.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format21s.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format21t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22b.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22c.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22cs.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22s.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format22x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format23x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format3.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format30t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format31c.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format31i.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format31t.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format32x.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format35c.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format35mi.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format35ms.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format3rc.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format3rmi.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format3rms.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format5.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/Format51l.java create mode 100644 tools/dexfuzz/src/dexfuzz/rawdex/formats/RawInsnHelper.java create mode 100755 tools/dist_linux_bionic.sh create mode 100644 tools/dmtracedump/Android.bp create mode 100644 tools/dmtracedump/createtesttrace.cc create mode 100755 tools/dmtracedump/dmtracedump.pl create mode 100644 tools/dmtracedump/dumpdir.sh create mode 100644 tools/dmtracedump/profile.h create mode 100644 tools/dmtracedump/tracedump.cc create mode 100755 tools/dt_fds_forward.py create mode 100644 tools/external_oj_libjdwp_art_failures.txt create mode 100755 tools/extract-embedded-java create mode 100755 tools/findbuildbotwarnings.py create mode 100755 tools/generate_cmake_lists.py create mode 100755 tools/generate_operator_out.py create mode 100755 tools/golem/build-target.sh create mode 100755 tools/golem/env create mode 100644 tools/hiddenapi/Android.bp create mode 100644 tools/hiddenapi/README.md create mode 100755 tools/hiddenapi/find_api_violations.pl create mode 100644 tools/hiddenapi/hiddenapi.cc create mode 100644 tools/hiddenapi/hiddenapi_test.cc create mode 100755 tools/host_bcp.sh create mode 100755 tools/javac-helper.sh create mode 100644 tools/jfuzz/Android.bp create mode 100644 tools/jfuzz/README.md create mode 100644 tools/jfuzz/__init__.py create mode 100644 tools/jfuzz/jfuzz.cc create mode 100755 tools/jfuzz/run_dex_fuzz_test.py create mode 100755 tools/jfuzz/run_jfuzz_test.py create mode 100755 tools/jfuzz/run_jfuzz_test_nightly.py create mode 100644 tools/jvmti-agents/README.md create mode 100644 tools/jvmti-agents/breakpoint-logger/Android.bp create mode 100644 tools/jvmti-agents/breakpoint-logger/README.md create mode 100644 tools/jvmti-agents/breakpoint-logger/breakpoint_logger.cc create mode 100644 tools/jvmti-agents/chain-agents/Android.bp create mode 100644 tools/jvmti-agents/chain-agents/README.md create mode 100644 tools/jvmti-agents/chain-agents/chainagents.cc create mode 100644 tools/jvmti-agents/dump-jvmti-state/Android.bp create mode 100644 tools/jvmti-agents/dump-jvmti-state/README.md create mode 100644 tools/jvmti-agents/dump-jvmti-state/dump-jvmti.cc create mode 100644 tools/jvmti-agents/field-counts/Android.bp create mode 100644 tools/jvmti-agents/field-counts/README.md create mode 100755 tools/jvmti-agents/field-counts/count-fields.py create mode 100644 tools/jvmti-agents/field-counts/fieldcount.cc create mode 100644 tools/jvmti-agents/field-null-percent/Android.bp create mode 100644 tools/jvmti-agents/field-null-percent/README.md create mode 100755 tools/jvmti-agents/field-null-percent/check-null-fields.py create mode 100644 tools/jvmti-agents/field-null-percent/fieldnull.cc create mode 100644 tools/jvmti-agents/jit-load/Android.bp create mode 100644 tools/jvmti-agents/jit-load/README.md create mode 100644 tools/jvmti-agents/jit-load/jitload.cc create mode 100644 tools/jvmti-agents/list-extensions/Android.bp create mode 100644 tools/jvmti-agents/list-extensions/README.md create mode 100644 tools/jvmti-agents/list-extensions/list-extensions.cc create mode 100644 tools/jvmti-agents/simple-force-redefine/Android.bp create mode 100644 tools/jvmti-agents/simple-force-redefine/README.md create mode 100644 tools/jvmti-agents/simple-force-redefine/forceredefine.cc create mode 100644 tools/jvmti-agents/ti-alloc-sample/Android.bp create mode 100644 tools/jvmti-agents/ti-alloc-sample/README.md create mode 100755 tools/jvmti-agents/ti-alloc-sample/mkflame.py create mode 100644 tools/jvmti-agents/ti-alloc-sample/ti_alloc_sample.cc create mode 100644 tools/jvmti-agents/ti-fast/Android.bp create mode 100644 tools/jvmti-agents/ti-fast/README.md create mode 100644 tools/jvmti-agents/ti-fast/tifast.cc create mode 100644 tools/jvmti-agents/titrace/Android.bp create mode 100644 tools/jvmti-agents/titrace/README.md create mode 100644 tools/jvmti-agents/titrace/instruction_decoder.cc create mode 100644 tools/jvmti-agents/titrace/instruction_decoder.h create mode 100644 tools/jvmti-agents/titrace/titrace.cc create mode 100644 tools/jvmti-agents/wrapagentproperties/Android.bp create mode 100644 tools/jvmti-agents/wrapagentproperties/README.md create mode 100644 tools/jvmti-agents/wrapagentproperties/wrapagentproperties.cc create mode 100644 tools/libcore_failures.txt create mode 100644 tools/libcore_fugu_failures.txt create mode 100644 tools/libcore_gcstress_debug_failures.txt create mode 100644 tools/libcore_gcstress_failures.txt create mode 100644 tools/libjavac/Android.bp create mode 100644 tools/libjavac/src/com/android/javac/Javac.java create mode 100644 tools/libjdwp-compat.props create mode 100644 tools/luci/config/cr-buildbucket.cfg create mode 100644 tools/luci/config/luci-logdog.cfg create mode 100644 tools/luci/config/luci-milo.cfg create mode 100644 tools/luci/config/luci-notify.cfg create mode 100644 tools/luci/config/luci-notify/email-templates/default.template create mode 100644 tools/luci/config/luci-scheduler.cfg create mode 100644 tools/luci/config/project.cfg create mode 100755 tools/parallel_run.py create mode 100644 tools/prebuilt_libjdwp_art_failures.txt create mode 100644 tools/public.libraries.buildbot.txt create mode 100755 tools/run-gtests.sh create mode 100755 tools/run-jdwp-tests.sh create mode 100755 tools/run-libcore-tests.sh create mode 100755 tools/run-libjdwp-tests.sh create mode 100755 tools/run-prebuilt-libjdwp-tests.sh create mode 100644 tools/runtime_memusage/README create mode 100755 tools/runtime_memusage/prune_sanitizer_output.py create mode 100755 tools/runtime_memusage/sanitizer_logcat_analysis.sh create mode 100755 tools/runtime_memusage/symbol_trace_info.py create mode 100644 tools/signal_dumper/Android.bp create mode 100644 tools/signal_dumper/signal_dumper.cc create mode 100755 tools/stream-trace-converter.py create mode 100755 tools/symbolize.sh create mode 100755 tools/test_presubmit.py create mode 100644 tools/tracefast-plugin/Android.bp create mode 100644 tools/tracefast-plugin/tracefast.cc create mode 100644 tools/veridex/Android.bp create mode 100644 tools/veridex/Android.mk create mode 100644 tools/veridex/README.md create mode 100644 tools/veridex/api_list_filter.h create mode 100755 tools/veridex/appcompat.sh create mode 100644 tools/veridex/class_filter.h create mode 100644 tools/veridex/flow_analysis.cc create mode 100644 tools/veridex/flow_analysis.h create mode 100644 tools/veridex/hidden_api.cc create mode 100644 tools/veridex/hidden_api.h create mode 100644 tools/veridex/hidden_api_finder.cc create mode 100644 tools/veridex/hidden_api_finder.h create mode 100644 tools/veridex/precise_hidden_api_finder.cc create mode 100644 tools/veridex/precise_hidden_api_finder.h create mode 100644 tools/veridex/resolver.cc create mode 100644 tools/veridex/resolver.h create mode 100644 tools/veridex/veridex.cc create mode 100644 tools/veridex/veridex.h create mode 100755 tools/wrap-logcat.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..803c297 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +JIT_ART +**/__pycache__/** +**/.idea +**/*.iml +**/*.pyc +**/*.swn diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000..818fcfb --- /dev/null +++ b/Android.bp @@ -0,0 +1,5 @@ +// If you're looking for ART global stuff, please see build/Android.bp. + +package { + default_visibility: ["//art:__subpackages__"], +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000..d4de2e5 --- /dev/null +++ b/Android.mk @@ -0,0 +1,872 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(call my-dir) + +art_path := $(LOCAL_PATH) + +######################################################################## +# clean-oat rules +# + +include $(art_path)/build/Android.common_path.mk +include $(art_path)/build/Android.oat.mk + +.PHONY: clean-oat +clean-oat: clean-oat-host clean-oat-target + +.PHONY: clean-oat-host +clean-oat-host: + find $(OUT_DIR) '(' -name '*.oat' -o -name '*.odex' -o -name '*.art' -o -name '*.vdex' ')' -a -type f | xargs rm -f + rm -rf $(TMPDIR)/*/test-*/dalvik-cache/* + rm -rf $(TMPDIR)/android-data/dalvik-cache/* + +.PHONY: clean-oat-target +clean-oat-target: + $(ADB) root + $(ADB) wait-for-device remount + $(ADB) shell rm -rf $(ART_TARGET_NATIVETEST_DIR) + $(ADB) shell rm -rf $(ART_TARGET_TEST_DIR) + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/* + $(ADB) shell rm -rf $(ART_DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH) + $(ADB) shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH) +ifdef TARGET_2ND_ARCH + $(ADB) shell rm -rf $(ART_DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) + $(ADB) shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) +endif + $(ADB) shell rm -rf data/run-test/test-*/dalvik-cache/* + +######################################################################## +# cpplint rules to style check art source files + +include $(art_path)/build/Android.cpplint.mk + +######################################################################## +# product rules + +include $(art_path)/oatdump/Android.mk +include $(art_path)/tools/ahat/Android.mk +include $(art_path)/tools/dexfuzz/Android.mk +include $(art_path)/tools/veridex/Android.mk + +ART_HOST_DEPENDENCIES := \ + $(ART_HOST_EXECUTABLES) \ + $(ART_HOST_DEX_DEPENDENCIES) \ + $(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) + +ifeq ($(ART_BUILD_HOST_DEBUG),true) +ART_HOST_DEPENDENCIES += $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) +endif + +ART_TARGET_DEPENDENCIES := \ + $(ART_TARGET_DEX_DEPENDENCIES) + +######################################################################## +# test rules + +# All the dependencies that must be built ahead of sync-ing them onto the target device. +TEST_ART_TARGET_SYNC_DEPS := $(ADB_EXECUTABLE) + +include $(art_path)/build/Android.common_test.mk +include $(art_path)/build/Android.gtest.mk +include $(art_path)/test/Android.run-test.mk + +TEST_ART_TARGET_SYNC_DEPS += $(ART_TEST_TARGET_GTEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES) + +# Make sure /system is writable on the device. +TEST_ART_ADB_ROOT_AND_REMOUNT := \ + ($(ADB) root && \ + $(ADB) wait-for-device remount && \ + (($(ADB) shell touch /system/testfile && \ + ($(ADB) shell rm /system/testfile || true)) || \ + ($(ADB) disable-verity && \ + $(ADB) reboot && \ + $(ADB) wait-for-device root && \ + $(ADB) wait-for-device remount))) + +# "mm test-art" to build and run all tests on host and device +.PHONY: test-art +test-art: test-art-host test-art-target + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-gtest +test-art-gtest: test-art-host-gtest test-art-target-gtest + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-run-test +test-art-run-test: test-art-host-run-test test-art-target-run-test + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +######################################################################## +# host test rules + +VIXL_TEST_DEPENDENCY := +# We can only run the vixl tests on 64-bit hosts (vixl testing issue) when its a +# top-level build (to declare the vixl test rule). +ifneq ($(HOST_PREFER_32_BIT),true) +ifeq ($(ONE_SHOT_MAKEFILE),) +VIXL_TEST_DEPENDENCY := run-vixl-tests +endif +endif + +.PHONY: test-art-host-vixl +test-art-host-vixl: $(VIXL_TEST_DEPENDENCY) + +# "mm test-art-host" to build and run all host tests. +.PHONY: test-art-host +test-art-host: test-art-host-gtest test-art-host-run-test \ + test-art-host-vixl test-art-host-dexdump + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely with the default compiler. +.PHONY: test-art-host-default +test-art-host-default: test-art-host-run-test-default + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely with the optimizing compiler. +.PHONY: test-art-host-optimizing +test-art-host-optimizing: test-art-host-run-test-optimizing + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely on the interpreter. +.PHONY: test-art-host-interpreter +test-art-host-interpreter: test-art-host-run-test-interpreter + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely on the jit. +.PHONY: test-art-host-jit +test-art-host-jit: test-art-host-run-test-jit + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Primary host architecture variants: +.PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \ + test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Secondary host architecture variants: +ifneq ($(HOST_PREFER_32_BIT),true) +.PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \ + test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) +endif + +# Dexdump/list regression test. +.PHONY: test-art-host-dexdump +test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump dexlist) + ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests + +######################################################################## +# target test rules + +# "mm test-art-target" to build and run all target tests. +.PHONY: test-art-target +test-art-target: test-art-target-gtest test-art-target-run-test + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely with the default compiler. +.PHONY: test-art-target-default +test-art-target-default: test-art-target-run-test-default + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely with the optimizing compiler. +.PHONY: test-art-target-optimizing +test-art-target-optimizing: test-art-target-run-test-optimizing + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely on the interpreter. +.PHONY: test-art-target-interpreter +test-art-target-interpreter: test-art-target-run-test-interpreter + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely on the jit. +.PHONY: test-art-target-jit +test-art-target-jit: test-art-target-run-test-jit + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Primary target architecture variants: +.PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \ + test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Secondary target architecture variants: +ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX +.PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \ + test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) +endif + + +####################### +# ART APEX. + +include $(CLEAR_VARS) + +# The ART APEX comes in three flavors: +# - the release module (`com.android.art.release`), containing +# only "release" artifacts; +# - the debug module (`com.android.art.debug`), containing both +# "release" and "debug" artifacts, as well as additional tools; +# - the testing module (`com.android.art.testing`), containing +# both "release" and "debug" artifacts, as well as additional tools +# and ART gtests). +# +# The ART APEX module (`com.android.art`) is an "alias" for either the +# release or the debug module. By default, "user" build variants contain +# the release module, while "userdebug" and "eng" build variants contain +# the debug module. However, if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` +# is defined, it overrides the previous logic: +# - if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is set to `false`, the +# build will include the release module (whatever the build +# variant); +# - if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is set to `true`, the +# build will include the debug module (whatever the build variant). + +art_target_include_debug_build := $(PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD) +ifneq (false,$(art_target_include_debug_build)) + ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT))) + art_target_include_debug_build := true + endif +endif +ifeq (true,$(art_target_include_debug_build)) + # Module with both release and debug variants, as well as + # additional tools. + TARGET_ART_APEX := $(DEBUG_ART_APEX) + APEX_TEST_MODULE := art-check-debug-apex-gen-fakebin +else + # Release module (without debug variants nor tools). + TARGET_ART_APEX := $(RELEASE_ART_APEX) + APEX_TEST_MODULE := art-check-release-apex-gen-fakebin +endif + +LOCAL_MODULE := com.android.art +LOCAL_REQUIRED_MODULES := $(TARGET_ART_APEX) +LOCAL_REQUIRED_MODULES += art_apex_boot_integrity + +# Clear locally used variable. +art_target_include_debug_build := + +include $(BUILD_PHONY_PACKAGE) + +include $(CLEAR_VARS) +LOCAL_MODULE := com.android.art +LOCAL_IS_HOST_MODULE := true +ifneq ($(HOST_OS),darwin) + LOCAL_REQUIRED_MODULES += $(APEX_TEST_MODULE) +endif +include $(BUILD_PHONY_PACKAGE) + +# Create canonical name -> file name symlink in the symbol directory +# The symbol files for the debug or release variant are installed to +# $(TARGET_OUT_UNSTRIPPED)/$(TARGET_ART_APEX) directory. However, +# since they are available via /apex/com.android.art at runtime +# regardless of which variant is installed, create a symlink so that +# $(TARGET_OUT_UNSTRIPPED)/apex/com.android.art is linked to +# $(TARGET_OUT_UNSTRIPPED)/apex/$(TARGET_ART_APEX). +# Note that installation of the symlink is triggered by the apex_manifest.pb +# file which is the file that is guaranteed to be created regardless of the +# value of TARGET_FLATTEN_APEX. +ifeq ($(TARGET_FLATTEN_APEX),true) +art_apex_manifest_file := $(PRODUCT_OUT)/system/apex/$(TARGET_ART_APEX)/apex_manifest.pb +else +art_apex_manifest_file := $(PRODUCT_OUT)/apex/$(TARGET_ART_APEX)/apex_manifest.pb +endif + +art_apex_symlink_timestamp := $(call intermediates-dir-for,FAKE,com.android.art)/symlink.timestamp +$(art_apex_manifest_file): $(art_apex_symlink_timestamp) +$(art_apex_manifest_file): PRIVATE_LINK_NAME := $(TARGET_OUT_UNSTRIPPED)/apex/com.android.art +$(art_apex_symlink_timestamp): + $(hide) mkdir -p $(dir $(PRIVATE_LINK_NAME)) + $(hide) ln -sf $(TARGET_ART_APEX) $(PRIVATE_LINK_NAME) + $(hide) touch $@ + +art_apex_manifest_file := + +####################### +# Fake packages for ART + +# The art-runtime package depends on the core ART libraries and binaries. It exists so we can +# manipulate the set of things shipped, e.g., add debug versions and so on. + +include $(CLEAR_VARS) +LOCAL_MODULE := art-runtime + +# Base requirements. +LOCAL_REQUIRED_MODULES := \ + dalvikvm.com.android.art.release \ + dex2oat.com.android.art.release \ + dexoptanalyzer.com.android.art.release \ + libart.com.android.art.release \ + libart-compiler.com.android.art.release \ + libopenjdkjvm.com.android.art.release \ + libopenjdkjvmti.com.android.art.release \ + profman.com.android.art.release \ + libadbconnection.com.android.art.release \ + libperfetto_hprof.com.android.art.release \ + +# Potentially add in debug variants: +# +# * We will never add them if PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD = false. +# * We will always add them if PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD = true. +# * Otherwise, we will add them by default to userdebug and eng builds. +art_target_include_debug_build := $(PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD) +ifneq (false,$(art_target_include_debug_build)) +ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT))) + art_target_include_debug_build := true +endif +ifeq (true,$(art_target_include_debug_build)) +LOCAL_REQUIRED_MODULES += \ + dex2oatd.com.android.art.debug \ + dexoptanalyzerd.com.android.art.debug \ + libartd.com.android.art.debug \ + libartd-compiler.com.android.art.debug \ + libopenjdkd.com.android.art.debug \ + libopenjdkjvmd.com.android.art.debug \ + libopenjdkjvmtid.com.android.art.debug \ + profmand.com.android.art.debug \ + libadbconnectiond.com.android.art.debug \ + libperfetto_hprofd.com.android.art.debug \ + +endif +endif + +include $(BUILD_PHONY_PACKAGE) + +# The art-tools package depends on helpers and tools that are useful for developers. Similar +# dependencies exist for the APEX builds for these tools (see build/apex/Android.bp). + +include $(CLEAR_VARS) +LOCAL_MODULE := art-tools +LOCAL_IS_HOST_MODULE := true +LOCAL_REQUIRED_MODULES := \ + ahat \ + dexdump \ + hprof-conv \ + +# A subset of the tools are disabled when HOST_PREFER_32_BIT is defined as make reports that +# they are not supported on host (b/129323791). This is likely due to art_apex disabling host +# APEX builds when HOST_PREFER_32_BIT is set (b/120617876). +ifneq ($(HOST_PREFER_32_BIT),true) +LOCAL_REQUIRED_MODULES += \ + dexdiag \ + dexlist \ + oatdump \ + +endif + +include $(BUILD_PHONY_PACKAGE) + +#################################################################################################### +# Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma. +# +# The library is required for starting a runtime in debug mode, but libartd does not depend on it +# (dependency cycle otherwise). +# +# Note: * As the package is phony to create a dependency the package name is irrelevant. +# * We make MULTILIB explicit to "both," just to state here that we want both libraries on +# 64-bit systems, even if it is the default. + +# ART on the host. +ifeq ($(ART_BUILD_HOST_DEBUG),true) +include $(CLEAR_VARS) +LOCAL_MODULE := art-libartd-libopenjdkd-host-dependency +LOCAL_MULTILIB := both +LOCAL_REQUIRED_MODULES := libopenjdkd +LOCAL_IS_HOST_MODULE := true +include $(BUILD_PHONY_PACKAGE) +endif + +# ART on the target. +ifeq ($(ART_BUILD_TARGET_DEBUG),true) +include $(CLEAR_VARS) +LOCAL_MODULE := art-libartd-libopenjdkd-target-dependency +LOCAL_MULTILIB := both +LOCAL_REQUIRED_MODULES := libopenjdkd +include $(BUILD_PHONY_PACKAGE) +endif + +######################################################################## +# "m build-art" for quick minimal build +.PHONY: build-art +build-art: build-art-host build-art-target + +.PHONY: build-art-host +build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CORE_IMG_OUTS) + +.PHONY: build-art-target +build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS) + +######################################################################## +# Workaround for not using symbolic links for linker and bionic libraries +# in a minimal setup (eg buildbot or golem). +######################################################################## + +PRIVATE_BIONIC_FILES := \ + bin/bootstrap/linker \ + bin/bootstrap/linker64 \ + lib/bootstrap/libc.so \ + lib/bootstrap/libm.so \ + lib/bootstrap/libdl.so \ + lib/bootstrap/libdl_android.so \ + lib64/bootstrap/libc.so \ + lib64/bootstrap/libm.so \ + lib64/bootstrap/libdl.so \ + lib64/bootstrap/libdl_android.so \ + +PRIVATE_ART_APEX_DEPENDENCY_FILES := \ + bin/dalvikvm32 \ + bin/dalvikvm64 \ + bin/dalvikvm \ + bin/dex2oat \ + bin/dex2oatd \ + bin/dexdump \ + +PRIVATE_ART_APEX_DEPENDENCY_LIBS := \ + lib/libadbconnectiond.so \ + lib/libadbconnection.so \ + lib/libandroidicu.so \ + lib/libandroidio.so \ + lib/libartbased.so \ + lib/libartbase.so \ + lib/libart-compiler.so \ + lib/libartd-compiler.so \ + lib/libartd-dexlayout.so \ + lib/libartd-disassembler.so \ + lib/libart-dexlayout.so \ + lib/libart-disassembler.so \ + lib/libartd.so \ + lib/libartpalette.so \ + lib/libart.so \ + lib/libbacktrace.so \ + lib/libbase.so \ + lib/libcrypto.so \ + lib/libdexfiled_external.so \ + lib/libdexfiled.so \ + lib/libdexfile_external.so \ + lib/libdexfile.so \ + lib/libdexfile_support.so \ + lib/libdt_fd_forward.so \ + lib/libdt_socket.so \ + lib/libexpat.so \ + lib/libicui18n.so \ + lib/libicu_jni.so \ + lib/libicuuc.so \ + lib/libjavacore.so \ + lib/libjdwp.so \ + lib/liblzma.so \ + lib/libmeminfo.so \ + lib/libnativebridge.so \ + lib/libnativehelper.so \ + lib/libnativeloader.so \ + lib/libnpt.so \ + lib/libopenjdkd.so \ + lib/libopenjdkjvmd.so \ + lib/libopenjdkjvm.so \ + lib/libopenjdkjvmtid.so \ + lib/libopenjdkjvmti.so \ + lib/libopenjdk.so \ + lib/libpac.so \ + lib/libprocinfo.so \ + lib/libprofiled.so \ + lib/libprofile.so \ + lib/libsigchain.so \ + lib/libunwindstack.so \ + lib/libvixld.so \ + lib/libvixl.so \ + lib/libziparchive.so \ + lib/libz.so \ + lib64/libadbconnectiond.so \ + lib64/libadbconnection.so \ + lib64/libandroidicu.so \ + lib64/libandroidio.so \ + lib64/libartbased.so \ + lib64/libartbase.so \ + lib64/libart-compiler.so \ + lib64/libartd-compiler.so \ + lib64/libartd-dexlayout.so \ + lib64/libartd-disassembler.so \ + lib64/libart-dexlayout.so \ + lib64/libart-disassembler.so \ + lib64/libartd.so \ + lib64/libartpalette.so \ + lib64/libart.so \ + lib64/libbacktrace.so \ + lib64/libbase.so \ + lib64/libcrypto.so \ + lib64/libdexfiled_external.so \ + lib64/libdexfiled.so \ + lib64/libdexfile_external.so \ + lib64/libdexfile.so \ + lib64/libdexfile_support.so \ + lib64/libdt_fd_forward.so \ + lib64/libdt_socket.so \ + lib64/libexpat.so \ + lib64/libicui18n.so \ + lib64/libicu_jni.so \ + lib64/libicuuc.so \ + lib64/libjavacore.so \ + lib64/libjdwp.so \ + lib64/liblzma.so \ + lib64/libmeminfo.so \ + lib64/libnativebridge.so \ + lib64/libnativehelper.so \ + lib64/libnativeloader.so \ + lib64/libnpt.so \ + lib64/libopenjdkd.so \ + lib64/libopenjdkjvmd.so \ + lib64/libopenjdkjvm.so \ + lib64/libopenjdkjvmtid.so \ + lib64/libopenjdkjvmti.so \ + lib64/libopenjdk.so \ + lib64/libpac.so \ + lib64/libprocinfo.so \ + lib64/libprofiled.so \ + lib64/libprofile.so \ + lib64/libsigchain.so \ + lib64/libunwindstack.so \ + lib64/libvixld.so \ + lib64/libvixl.so \ + lib64/libziparchive.so \ + lib64/libz.so \ + +PRIVATE_CONSCRYPT_APEX_DEPENDENCY_LIBS := \ + lib/libcrypto.so \ + lib/libjavacrypto.so \ + lib/libssl.so \ + lib64/libcrypto.so \ + lib64/libjavacrypto.so \ + lib64/libssl.so \ + +# Generate copies of Bionic bootstrap artifacts and ART APEX +# libraries in the `system` (TARGET_OUT) directory. This is dangerous +# as these files could inadvertently stay in this directory and be +# included in a system image. +# +# Copy some libraries into `$(TARGET_OUT)/lib(64)` (the +# `/system/lib(64)` directory to be sync'd to the target) for ART testing +# purposes: +# - Bionic bootstrap libraries, copied from +# `$(TARGET_OUT)/lib(64)/bootstrap` (the `/system/lib(64)/bootstrap` +# directory to be sync'd to the target); +# - Programs and libraries from the ART APEX; if the product +# to build uses flattened APEXes, these libraries are copied from +# `$(TARGET_OUT)/apex/com.android.art.debug` (the flattened +# (Debug) ART APEX directory to be sync'd to the target); +# otherwise, they are copied from +# `$(TARGET_OUT)/../apex/com.android.art.debug` (the local +# directory under the build tree containing the (Debug) ART APEX +# artifacts, which is not sync'd to the target). +# - Libraries from the Conscrypt APEX may be loaded during golem runs. +# +# This target is only used by Golem now. +# +# NB Android build does not use cp from: +# $ANDROID_BUILD_TOP/prebuilts/build-tools/path/{linux-x86,darwin-x86} +# which has a non-standard set of command-line flags. +# +# TODO(b/129332183): Remove this when Golem has full support for the +# ART APEX. +.PHONY: standalone-apex-files +standalone-apex-files: libc.bootstrap \ + libdl.bootstrap \ + libdl_android.bootstrap \ + libm.bootstrap \ + linker \ + $(DEBUG_ART_APEX) \ + $(CONSCRYPT_APEX) + for f in $(PRIVATE_BIONIC_FILES); do \ + tf=$(TARGET_OUT)/$$f; \ + if [ -f $$tf ]; then cp -f $$tf $$(echo $$tf | sed 's,bootstrap/,,'); fi; \ + done + if [ "x$(TARGET_FLATTEN_APEX)" = xtrue ]; then \ + apex_orig_dir=$(TARGET_OUT)/apex; \ + else \ + apex_orig_dir=""; \ + fi; \ + art_apex_orig_dir=$$apex_orig_dir/$(DEBUG_ART_APEX); \ + for f in $(PRIVATE_ART_APEX_DEPENDENCY_LIBS) $(PRIVATE_ART_APEX_DEPENDENCY_FILES); do \ + tf="$$art_apex_orig_dir/$$f"; \ + df="$(TARGET_OUT)/$$f"; \ + if [ -f $$tf ]; then \ + if [ -h $$df ]; then rm $$df; fi; \ + cp -fd $$tf $$df; \ + fi; \ + done; \ + conscrypt_apex_orig_dir=$$apex_orig_dir/$(CONSCRYPT_APEX); \ + for f in $(PRIVATE_CONSCRYPT_APEX_DEPENDENCY_LIBS); do \ + tf="$$conscrypt_apex_orig_dir/$$f"; \ + if [ -f $$tf ]; then cp -f $$tf $(TARGET_OUT)/$$f; fi; \ + done; \ + +######################################################################## +# Phony target for only building what go/lem requires for pushing ART on /data. + +.PHONY: build-art-target-golem +# Also include libartbenchmark, we always include it when running golem. +# libstdc++ is needed when building for ART_TARGET_LINUX. + +# Also include the bootstrap Bionic libraries (libc, libdl, libdl_android, +# libm). These are required as the "main" libc, libdl, libdl_android, and libm +# have moved to the ART APEX. This is a temporary change needed until Golem +# fully supports the ART APEX. +# +# TODO(b/129332183): Remove this when Golem has full support for the +# ART APEX. + +# Also include: +# - a copy of the ICU prebuilt .dat file in /system/etc/icu on target +# (see module `icu-data-art-test-i18n`); and +# so that it can be found even if the ART APEX is not available, by setting the +# environment variable `ART_TEST_ANDROID_ART_ROOT` to "/system" on device. This +# is a temporary change needed until Golem fully supports the ART APEX. +# +# TODO(b/129332183): Remove this when Golem has full support for the +# ART APEX. + +# Also include: +# - a copy of the time zone data prebuilt files in +# /system/etc/tzdata_module/etc/tz and /system/etc/tzdata_module/etc/icu +# on target, (see modules `tzdata-art-test-tzdata`, +# `tzlookup.xml-art-test-tzdata`, and `tz_version-art-test-tzdata`, and +# `icu_overlay-art-test-tzdata`) +# so that they can be found even if the Time Zone Data APEX is not available, +# by setting the environment variable `ART_TEST_ANDROID_TZDATA_ROOT` +# to "/system/etc/tzdata_module" on device. This is a temporary change needed +# until Golem fully supports the Time Zone Data APEX. +# +# TODO(b/129332183): Remove this when Golem has full support for the +# ART APEX (and TZ Data APEX). + +ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so +build-art-target-golem: dex2oat dalvikvm linker libstdc++ \ + $(TARGET_OUT_EXECUTABLES)/art \ + $(TARGET_OUT)/etc/public.libraries.txt \ + $(ART_TARGET_DEX_DEPENDENCIES) \ + $(ART_DEBUG_TARGET_SHARED_LIBRARY_DEPENDENCIES) \ + $(ART_TARGET_SHARED_LIBRARY_BENCHMARK) \ + $(TARGET_CORE_IMG_OUT_BASE).art \ + $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art \ + libartpalette-system \ + libc.bootstrap libdl.bootstrap libdl_android.bootstrap libm.bootstrap \ + icu-data-art-test-i18n \ + tzdata-art-test-tzdata tzlookup.xml-art-test-tzdata \ + tz_version-art-test-tzdata icu_overlay-art-test-tzdata \ + standalone-apex-files + # remove debug libraries from public.libraries.txt because golem builds + # won't have it. + sed -i '/libartd.so/d' $(TARGET_OUT)/etc/public.libraries.txt + sed -i '/libdexfiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt + sed -i '/libprofiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt + sed -i '/libartbased.so/d' $(TARGET_OUT)/etc/public.libraries.txt + +######################################################################## +# Phony target for building what go/lem requires on host. +.PHONY: build-art-host-golem +# Also include libartbenchmark, we always include it when running golem. +ART_HOST_SHARED_LIBRARY_BENCHMARK := $(ART_HOST_OUT_SHARED_LIBRARIES)/libartbenchmark.so +build-art-host-golem: build-art-host \ + $(ART_HOST_SHARED_LIBRARY_BENCHMARK) + +######################################################################## +# Phony target for building what go/lem requires for syncing /system to target. +.PHONY: build-art-unbundled-golem +build-art-unbundled-golem: art-runtime linker oatdump $(ART_APEX_JARS) conscrypt crash_dump + +######################################################################## +# Rules for building all dependencies for tests. + +.PHONY: build-art-host-tests +build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) + +.PHONY: build-art-target-tests +build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) + +######################################################################## +# targets to switch back and forth from libdvm to libart + +.PHONY: use-art +use-art: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so + $(ADB) shell start + +.PHONY: use-artd +use-artd: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + $(ADB) shell start + +.PHONY: use-dalvik +use-dalvik: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so + $(ADB) shell start + +.PHONY: use-art-full +use-art-full: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter \"\" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so + $(ADB) shell setprop dalvik.vm.usejit false + $(ADB) shell start + +.PHONY: use-artd-full +use-artd-full: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter \"\" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter \"\" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + $(ADB) shell setprop dalvik.vm.usejit false + $(ADB) shell start + +.PHONY: use-art-jit +use-art-jit: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so + $(ADB) shell setprop dalvik.vm.usejit true + $(ADB) shell start + +.PHONY: use-art-interpret-only +use-art-interpret-only: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so + $(ADB) shell setprop dalvik.vm.usejit false + $(ADB) shell start + +.PHONY: use-artd-interpret-only +use-artd-interpret-only: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter "interpret-only" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + $(ADB) shell setprop dalvik.vm.usejit false + $(ADB) shell start + +.PHONY: use-art-verify-none +use-art-verify-none: + $(ADB) root + $(ADB) wait-for-device shell stop + $(ADB) shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + $(ADB) shell setprop dalvik.vm.dex2oat-filter "verify-none" + $(ADB) shell setprop dalvik.vm.image-dex2oat-filter "verify-none" + $(ADB) shell setprop persist.sys.dalvik.vm.lib.2 libart.so + $(ADB) shell setprop dalvik.vm.usejit false + $(ADB) shell start + +######################################################################## + +# Clear locally used variables. +TEST_ART_TARGET_SYNC_DEPS := + +# Helper target that depends on boot image creation. +# +# Can be used, for example, to dump initialization failures: +# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt +.PHONY: art-boot-image +art-boot-image: $(DEXPREOPT_IMAGE_boot_$(TARGET_ARCH)) + +.PHONY: art-job-images +art-job-images: \ + art-boot-image \ + $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \ + $(HOST_OUT_EXECUTABLES)/dex2oats \ + $(HOST_OUT_EXECUTABLES)/dex2oatds \ + $(HOST_OUT_EXECUTABLES)/profman diff --git a/CPPLINT.cfg b/CPPLINT.cfg new file mode 100644 index 0000000..8328842 --- /dev/null +++ b/CPPLINT.cfg @@ -0,0 +1,33 @@ +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Don't search for additional CPPLINT.cfg in parent directories. +set noparent + +# Use 'ART_' as the cpp header guard prefix (e.g. #ifndef ART_PATH_TO_FILE_H_). +root=.. + +# Limit line length. +linelength=100 + +# Ignore the following categories of errors, as specified by the filter: +# (the filter settings are concatenated together) +filter=-build/c++11 +filter=-build/include +filter=-readability/function,-readability/streams,-readability/todo +filter=-runtime/printf,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn +# TODO: this should be re-enabled. +filter=-whitespace/line_length diff --git a/CleanSpec.mk b/CleanSpec.mk new file mode 100644 index 0000000..fe613b2 --- /dev/null +++ b/CleanSpec.mk @@ -0,0 +1,110 @@ +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If you don't need to do a full clean build but would like to touch +# a file or delete some intermediate files, add a clean step to the end +# of the list. These steps will only be run once, if they haven't been +# run before. +# +# E.g.: +# $(call add-clean-step, touch -c external/sqlite/sqlite3.h) +# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates) +# +# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with +# files that are missing or have been moved. +# +# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory. +# Use $(OUT_DIR) to refer to the "out" directory. +# +# If you need to re-do something that's already mentioned, just copy +# the command and add it to the bottom of the list. E.g., if a change +# that you made last week required touching a file and a change you +# made today requires touching the same file, just copy the old +# touch step and add it to the end of the list. +# +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ + +# For example: +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates) +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates) +#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f) +#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*) + +# Switching to jemalloc requires deleting these files. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libart_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libartd_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libart_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libartd_*) + +# Old Android Runtime APEX package, before the introduction of "release" and "debug" packages. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.runtime.apex) + +# Clean up ICU libraries moved to runtime apex +$(call add-clean-step, rm -f $(PRODUCT_OUT)/system/lib*/libandroidicu.so) +$(call add-clean-step, rm -f $(PRODUCT_OUT)/system/lib*/libpac.so) + +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/art_libdexfile_support_tests/dex_file_supp_test) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/) + +# Clean up duplicate compiles between static and shared compiles of libart and libartd +$(call add-clean-step, rm -rf $(OUT_DIR)/soong/.intermediates/art/runtime/libart/*shared*/obj) +$(call add-clean-step, rm -rf $(OUT_DIR)/soong/.intermediates/art/runtime/libartd/*shared*/obj) + +# Force regeneration of .apex files after removal of time zone data files from the runtime APEX +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.runtime.*) + +# Remove artifacts that used to be generated (as a workaround for +# improper Runtime APEX support) by tools/buildbot-build.sh via the +# `standalone-apex-files` Make rule. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/bin) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*) +# Remove artifacts that used to be generated (as a workaround for +# improper Runtime APEX support) by tools/buildbot-build.sh via the +# `icu-data-art-test` Make rule. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/icu) + +# Remove ART test target artifacts. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/) + +# Remove all APEX artifacts after the change to use the Testing +# Runtime APEX in lieu of the Debug Runtime APEX for ART testing. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex) + +# Remove the icu .dat file from /apex/com.android.runtime and the host equivalent. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex) +$(call add-clean-step, rm -rf $(HOST_OUT)/com.android.runtime/etc/icu/*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/icu) + +# Remove all APEX artifacts for the Runtime/ART APEX split. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex) +$(call add-clean-step, rm -rf $(HOST_OUT)/apex) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/apex) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/apex) + +# Remove dex2oat artifacts for boot image extensions (workaround for broken dependencies). +$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f) +$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f) +$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f) + +# Remove empty dir for art APEX because it will be created on demand while mounting release|debug +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.art) + +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2 new file mode 100644 index 0000000..e69de29 diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..faed58a --- /dev/null +++ b/NOTICE @@ -0,0 +1,190 @@ + + Copyright (c) 2005-2013, The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000..7297a14 --- /dev/null +++ b/OWNERS @@ -0,0 +1,3 @@ +ngeoffray@google.com +sehr@google.com +* diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg new file mode 100644 index 0000000..6ab01dc --- /dev/null +++ b/PREUPLOAD.cfg @@ -0,0 +1,11 @@ +[Hook Scripts] +check_generated_tests_up_to_date = tools/test_presubmit.py + +[Builtin Hooks] +cpplint = true +bpfmt = true +gofmt = true + +[Builtin Hooks Options] +# Cpplint prints nothing unless there were errors. +cpplint = --quiet ${PREUPLOAD_FILES} diff --git a/TEST_MAPPING b/TEST_MAPPING new file mode 100644 index 0000000..28dab29 --- /dev/null +++ b/TEST_MAPPING @@ -0,0 +1,10 @@ +{ + "presubmit": [ + { + "name": "CtsJdwpTestCases" + }, + { + "name": "BootImageProfileTest" + } + ] +} diff --git a/adbconnection/Android.bp b/adbconnection/Android.bp new file mode 100644 index 0000000..b03cd0d --- /dev/null +++ b/adbconnection/Android.bp @@ -0,0 +1,76 @@ +// +// Copyright (C) 2017 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Build variants {target,host} x {debug,ndebug} x {32,64} + +cc_defaults { + name: "adbconnection-defaults", + host_supported: true, + srcs: ["adbconnection.cc"], + defaults: ["art_defaults"], + + // Note that this tool needs to be built for both 32-bit and 64-bit since it requires + // to be same ISA as what it is attached to. + compile_multilib: "both", + + shared_libs: [ + "libbase", + "libadbconnection_client", + ], + target: { + host: { + }, + darwin: { + enabled: false, + }, + }, + header_libs: [ + "libnativehelper_header_only", + "dt_fd_forward_export", + ], + required: [ + "libjdwp", + "libdt_fd_forward", + ], +} + +art_cc_library { + name: "libadbconnection", + defaults: ["adbconnection-defaults"], + shared_libs: [ + "libart", + "libartbase", + ], + apex_available: [ + "com.android.art.release", + "com.android.art.debug", + ], +} + +art_cc_library { + name: "libadbconnectiond", + defaults: [ + "art_debug_defaults", + "adbconnection-defaults", + ], + shared_libs: [ + "libartd", + "libartbased", + ], + apex_available: [ + "com.android.art.debug", + ], +} diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc new file mode 100644 index 0000000..7ac2edb --- /dev/null +++ b/adbconnection/adbconnection.cc @@ -0,0 +1,865 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "adbconnection.h" + +#include "adbconnection/client.h" +#include "android-base/endian.h" +#include "android-base/stringprintf.h" +#include "base/file_utils.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "base/socket_peer_is_trusted.h" +#include "debugger.h" +#include "jni/java_vm_ext.h" +#include "jni/jni_env_ext.h" +#include "mirror/throwable.h" +#include "nativehelper/scoped_local_ref.h" +#include "runtime-inl.h" +#include "runtime_callbacks.h" +#include "scoped_thread_state_change-inl.h" +#include "well_known_classes.h" + +#include "fd_transport.h" + +#include "poll.h" + +#include +#include +#include +#include +#include +#include + +namespace adbconnection { + +static constexpr size_t kJdwpHeaderLen = 11U; +/* DDM support */ +static constexpr uint8_t kJdwpDdmCmdSet = 199U; // 0xc7, or 'G'+128 +static constexpr uint8_t kJdwpDdmCmd = 1U; + +// Messages sent from the transport +using dt_fd_forward::kListenStartMessage; +using dt_fd_forward::kListenEndMessage; +using dt_fd_forward::kAcceptMessage; +using dt_fd_forward::kCloseMessage; + +// Messages sent to the transport +using dt_fd_forward::kPerformHandshakeMessage; +using dt_fd_forward::kSkipHandshakeMessage; + +using android::base::StringPrintf; + +static constexpr const char kJdwpHandshake[14] = { + 'J', 'D', 'W', 'P', '-', 'H', 'a', 'n', 'd', 's', 'h', 'a', 'k', 'e' +}; + +static constexpr int kEventfdLocked = 0; +static constexpr int kEventfdUnlocked = 1; + +static constexpr size_t kPacketHeaderLen = 11; +static constexpr off_t kPacketSizeOff = 0; +static constexpr off_t kPacketIdOff = 4; +static constexpr off_t kPacketCommandSetOff = 9; +static constexpr off_t kPacketCommandOff = 10; + +static constexpr uint8_t kDdmCommandSet = 199; +static constexpr uint8_t kDdmChunkCommand = 1; + +static std::optional gState; +static std::optional gPthread; + +static bool IsDebuggingPossible() { + return art::Dbg::IsJdwpAllowed(); +} + +// Begin running the debugger. +void AdbConnectionDebuggerController::StartDebugger() { + if (IsDebuggingPossible()) { + connection_->StartDebuggerThreads(); + } else { + LOG(ERROR) << "Not starting debugger since process cannot load the jdwp agent."; + } +} + +// The debugger should have already shut down since the runtime is ending. As far +// as the agent is concerned shutdown already happened when we went to kDeath +// state. We need to clean up our threads still though and this is a good time +// to do it since the runtime is still able to handle all the normal state +// transitions. +void AdbConnectionDebuggerController::StopDebugger() { + // Stop our threads. + gState->StopDebuggerThreads(); + // Wait for our threads to actually return and cleanup the pthread. + if (gPthread.has_value()) { + void* ret_unused; + if (TEMP_FAILURE_RETRY(pthread_join(gPthread.value(), &ret_unused)) != 0) { + PLOG(ERROR) << "Failed to join debugger threads!"; + } + gPthread.reset(); + } +} + +bool AdbConnectionDebuggerController::IsDebuggerConfigured() { + return IsDebuggingPossible() && !art::Runtime::Current()->GetJdwpOptions().empty(); +} + +void AdbConnectionDdmCallback::DdmPublishChunk(uint32_t type, + const art::ArrayRef& data) { + connection_->PublishDdmData(type, data); +} + +class ScopedEventFdLock { + public: + explicit ScopedEventFdLock(int fd) : fd_(fd), data_(0) { + TEMP_FAILURE_RETRY(read(fd_, &data_, sizeof(data_))); + } + + ~ScopedEventFdLock() { + TEMP_FAILURE_RETRY(write(fd_, &data_, sizeof(data_))); + } + + private: + int fd_; + uint64_t data_; +}; + +AdbConnectionState::AdbConnectionState(const std::string& agent_name) + : agent_name_(agent_name), + controller_(this), + ddm_callback_(this), + sleep_event_fd_(-1), + control_ctx_(nullptr, adbconnection_client_destroy), + local_agent_control_sock_(-1), + remote_agent_control_sock_(-1), + adb_connection_socket_(-1), + adb_write_event_fd_(-1), + shutting_down_(false), + agent_loaded_(false), + agent_listening_(false), + agent_has_socket_(false), + sent_agent_fds_(false), + performed_handshake_(false), + notified_ddm_active_(false), + next_ddm_id_(1), + started_debugger_threads_(false) { + // Setup the addr. + control_addr_.controlAddrUn.sun_family = AF_UNIX; + control_addr_len_ = sizeof(control_addr_.controlAddrUn.sun_family) + sizeof(kJdwpControlName) - 1; + memcpy(control_addr_.controlAddrUn.sun_path, kJdwpControlName, sizeof(kJdwpControlName) - 1); + + // Add the startup callback. + art::ScopedObjectAccess soa(art::Thread::Current()); + art::Runtime::Current()->GetRuntimeCallbacks()->AddDebuggerControlCallback(&controller_); +} + +AdbConnectionState::~AdbConnectionState() { + // Remove the startup callback. + art::Thread* self = art::Thread::Current(); + if (self != nullptr) { + art::ScopedObjectAccess soa(self); + art::Runtime::Current()->GetRuntimeCallbacks()->RemoveDebuggerControlCallback(&controller_); + } +} + +static jobject CreateAdbConnectionThread(art::Thread* thr) { + JNIEnv* env = thr->GetJniEnv(); + // Move to native state to talk with the jnienv api. + art::ScopedThreadStateChange stsc(thr, art::kNative); + ScopedLocalRef thr_name(env, env->NewStringUTF(kAdbConnectionThreadName)); + ScopedLocalRef thr_group( + env, + env->GetStaticObjectField(art::WellKnownClasses::java_lang_ThreadGroup, + art::WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup)); + return env->NewObject(art::WellKnownClasses::java_lang_Thread, + art::WellKnownClasses::java_lang_Thread_init, + thr_group.get(), + thr_name.get(), + /*Priority=*/ 0, + /*Daemon=*/ true); +} + +struct CallbackData { + AdbConnectionState* this_; + jobject thr_; +}; + +static void* CallbackFunction(void* vdata) { + std::unique_ptr data(reinterpret_cast(vdata)); + art::Thread* self = art::Thread::Attach(kAdbConnectionThreadName, + true, + data->thr_); + CHECK(self != nullptr) << "threads_being_born_ should have ensured thread could be attached."; + // The name in Attach() is only for logging. Set the thread name. This is important so + // that the thread is no longer seen as starting up. + { + art::ScopedObjectAccess soa(self); + self->SetThreadName(kAdbConnectionThreadName); + } + + // Release the peer. + JNIEnv* env = self->GetJniEnv(); + env->DeleteGlobalRef(data->thr_); + data->thr_ = nullptr; + { + // The StartThreadBirth was called in the parent thread. We let the runtime know we are up + // before going into the provided code. + art::MutexLock mu(self, *art::Locks::runtime_shutdown_lock_); + art::Runtime::Current()->EndThreadBirth(); + } + data->this_->RunPollLoop(self); + int detach_result = art::Runtime::Current()->GetJavaVM()->DetachCurrentThread(); + CHECK_EQ(detach_result, 0); + + return nullptr; +} + +void AdbConnectionState::StartDebuggerThreads() { + // First do all the final setup we need. + CHECK_EQ(adb_write_event_fd_.get(), -1); + CHECK_EQ(sleep_event_fd_.get(), -1); + CHECK_EQ(local_agent_control_sock_.get(), -1); + CHECK_EQ(remote_agent_control_sock_.get(), -1); + + sleep_event_fd_.reset(eventfd(kEventfdLocked, EFD_CLOEXEC)); + CHECK_NE(sleep_event_fd_.get(), -1) << "Unable to create wakeup eventfd."; + adb_write_event_fd_.reset(eventfd(kEventfdUnlocked, EFD_CLOEXEC)); + CHECK_NE(adb_write_event_fd_.get(), -1) << "Unable to create write-lock eventfd."; + + { + art::ScopedObjectAccess soa(art::Thread::Current()); + art::Runtime::Current()->GetRuntimeCallbacks()->AddDdmCallback(&ddm_callback_); + } + // Setup the socketpair we use to talk to the agent. + bool has_sockets; + do { + has_sockets = android::base::Socketpair(AF_UNIX, + SOCK_SEQPACKET | SOCK_CLOEXEC, + 0, + &local_agent_control_sock_, + &remote_agent_control_sock_); + } while (!has_sockets && errno == EINTR); + if (!has_sockets) { + PLOG(FATAL) << "Unable to create socketpair for agent control!"; + } + + // Next start the threads. + art::Thread* self = art::Thread::Current(); + art::ScopedObjectAccess soa(self); + { + art::Runtime* runtime = art::Runtime::Current(); + art::MutexLock mu(self, *art::Locks::runtime_shutdown_lock_); + if (runtime->IsShuttingDownLocked()) { + // The runtime is shutting down so we cannot create new threads. This shouldn't really happen. + LOG(ERROR) << "The runtime is shutting down when we are trying to start up the debugger!"; + return; + } + runtime->StartThreadBirth(); + } + ScopedLocalRef thr(soa.Env(), CreateAdbConnectionThread(soa.Self())); + // Note: Using pthreads instead of std::thread to not abort when the thread cannot be + // created (exception support required). + std::unique_ptr data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) }); + started_debugger_threads_ = true; + gPthread.emplace(); + int pthread_create_result = pthread_create(&gPthread.value(), + nullptr, + &CallbackFunction, + data.get()); + if (pthread_create_result != 0) { + gPthread.reset(); + started_debugger_threads_ = false; + // If the create succeeded the other thread will call EndThreadBirth. + art::Runtime* runtime = art::Runtime::Current(); + soa.Env()->DeleteGlobalRef(data->thr_); + LOG(ERROR) << "Failed to create thread for adb-jdwp connection manager!"; + art::MutexLock mu(art::Thread::Current(), *art::Locks::runtime_shutdown_lock_); + runtime->EndThreadBirth(); + return; + } + data.release(); // NOLINT pthreads API. +} + +static bool FlagsSet(int16_t data, int16_t flags) { + return (data & flags) == flags; +} + +void AdbConnectionState::CloseFds() { + { + // Lock the write_event_fd so that concurrent PublishDdms will see that the connection is + // closed. + ScopedEventFdLock lk(adb_write_event_fd_); + // shutdown(adb_connection_socket_, SHUT_RDWR); + adb_connection_socket_.reset(); + } + + // If we didn't load anything we will need to do the handshake again. + performed_handshake_ = false; + + // If the agent isn't loaded we might need to tell ddms code the connection is closed. + if (!agent_loaded_ && notified_ddm_active_) { + NotifyDdms(/*active=*/false); + } +} + +void AdbConnectionState::NotifyDdms(bool active) { + art::ScopedObjectAccess soa(art::Thread::Current()); + DCHECK_NE(notified_ddm_active_, active); + notified_ddm_active_ = active; + if (active) { + art::Dbg::DdmConnected(); + } else { + art::Dbg::DdmDisconnected(); + } +} + +uint32_t AdbConnectionState::NextDdmId() { + // Just have a normal counter but always set the sign bit. + return (next_ddm_id_++) | 0x80000000; +} + +void AdbConnectionState::PublishDdmData(uint32_t type, const art::ArrayRef& data) { + SendDdmPacket(NextDdmId(), DdmPacketType::kCmd, type, data); +} + +void AdbConnectionState::SendDdmPacket(uint32_t id, + DdmPacketType packet_type, + uint32_t type, + art::ArrayRef data) { + // Get the write_event early to fail fast. + ScopedEventFdLock lk(adb_write_event_fd_); + if (adb_connection_socket_ == -1) { + VLOG(jdwp) << "Not sending ddms data of type " + << StringPrintf("%c%c%c%c", + static_cast(type >> 24), + static_cast(type >> 16), + static_cast(type >> 8), + static_cast(type)) << " due to no connection!"; + // Adb is not connected. + return; + } + + // the adb_write_event_fd_ will ensure that the adb_connection_socket_ will not go away until + // after we have sent our data. + static constexpr uint32_t kDdmPacketHeaderSize = + kJdwpHeaderLen // jdwp command packet size + + sizeof(uint32_t) // Type + + sizeof(uint32_t); // length + alignas(sizeof(uint32_t)) std::array pkt; + uint8_t* pkt_data = pkt.data(); + + // Write the length first. + *reinterpret_cast(pkt_data) = htonl(kDdmPacketHeaderSize + data.size()); + pkt_data += sizeof(uint32_t); + + // Write the id next; + *reinterpret_cast(pkt_data) = htonl(id); + pkt_data += sizeof(uint32_t); + + // next the flags. (0 for cmd packet because DDMS). + *(pkt_data++) = static_cast(packet_type); + switch (packet_type) { + case DdmPacketType::kCmd: { + // Now the cmd-set + *(pkt_data++) = kJdwpDdmCmdSet; + // Now the command + *(pkt_data++) = kJdwpDdmCmd; + break; + } + case DdmPacketType::kReply: { + // This is the error code bytes which are all 0 + *(pkt_data++) = 0; + *(pkt_data++) = 0; + } + } + + // These are at unaligned addresses so we need to do them manually. + // now the type. + uint32_t net_type = htonl(type); + memcpy(pkt_data, &net_type, sizeof(net_type)); + pkt_data += sizeof(uint32_t); + + // Now the data.size() + uint32_t net_len = htonl(data.size()); + memcpy(pkt_data, &net_len, sizeof(net_len)); + pkt_data += sizeof(uint32_t); + + static uint32_t constexpr kIovSize = 2; + struct iovec iovs[kIovSize] = { + { pkt.data(), pkt.size() }, + { const_cast(data.data()), data.size() }, + }; + // now pkt_header has the header. + // use writev to send the actual data. + ssize_t res = TEMP_FAILURE_RETRY(writev(adb_connection_socket_, iovs, kIovSize)); + if (static_cast(res) != (kDdmPacketHeaderSize + data.size())) { + PLOG(ERROR) << StringPrintf("Failed to send DDMS packet %c%c%c%c to debugger (%zd of %zu)", + static_cast(type >> 24), + static_cast(type >> 16), + static_cast(type >> 8), + static_cast(type), + res, data.size() + kDdmPacketHeaderSize); + } else { + VLOG(jdwp) << StringPrintf("sent DDMS packet %c%c%c%c to debugger %zu", + static_cast(type >> 24), + static_cast(type >> 16), + static_cast(type >> 8), + static_cast(type), + data.size() + kDdmPacketHeaderSize); + } +} + +void AdbConnectionState::SendAgentFds(bool require_handshake) { + DCHECK(!sent_agent_fds_); + const char* message = require_handshake ? kPerformHandshakeMessage : kSkipHandshakeMessage; + union { + cmsghdr cm; + char buffer[CMSG_SPACE(dt_fd_forward::FdSet::kDataLength)]; + } cm_un; + iovec iov; + iov.iov_base = const_cast(message); + iov.iov_len = strlen(message) + 1; + + msghdr msg; + msg.msg_name = nullptr; + msg.msg_namelen = 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_flags = 0; + msg.msg_control = cm_un.buffer; + msg.msg_controllen = sizeof(cm_un.buffer); + + cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_len = CMSG_LEN(dt_fd_forward::FdSet::kDataLength); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + + // Duplicate the fds before sending them. + android::base::unique_fd read_fd(art::DupCloexec(adb_connection_socket_)); + CHECK_NE(read_fd.get(), -1) << "Failed to dup read_fd_: " << strerror(errno); + android::base::unique_fd write_fd(art::DupCloexec(adb_connection_socket_)); + CHECK_NE(write_fd.get(), -1) << "Failed to dup write_fd: " << strerror(errno); + android::base::unique_fd write_lock_fd(art::DupCloexec(adb_write_event_fd_)); + CHECK_NE(write_lock_fd.get(), -1) << "Failed to dup write_lock_fd: " << strerror(errno); + + dt_fd_forward::FdSet { + read_fd.get(), write_fd.get(), write_lock_fd.get() + }.WriteData(CMSG_DATA(cmsg)); + + int res = TEMP_FAILURE_RETRY(sendmsg(local_agent_control_sock_, &msg, MSG_EOR)); + if (res < 0) { + PLOG(ERROR) << "Failed to send agent adb connection fds."; + } else { + sent_agent_fds_ = true; + VLOG(jdwp) << "Fds have been sent to jdwp agent!"; + } +} + +android::base::unique_fd AdbConnectionState::ReadFdFromAdb() { + return android::base::unique_fd(adbconnection_client_receive_jdwp_fd(control_ctx_.get())); +} + +bool AdbConnectionState::SetupAdbConnection() { + int sleep_ms = 500; + const int sleep_max_ms = 2 * 1000; + + const AdbConnectionClientInfo infos[] = { + {.type = AdbConnectionClientInfoType::pid, .data.pid = static_cast(getpid())}, + {.type = AdbConnectionClientInfoType::debuggable, .data.debuggable = true}, + }; + const AdbConnectionClientInfo* info_ptrs[] = {&infos[0], &infos[1]}; + + while (!shutting_down_) { + // If adbd isn't running, because USB debugging was disabled or + // perhaps the system is restarting it for "adb root", the + // connect() will fail. We loop here forever waiting for it + // to come back. + // + // Waking up and polling every couple of seconds is generally a + // bad thing to do, but we only do this if the application is + // debuggable *and* adbd isn't running. Still, for the sake + // of battery life, we should consider timing out and giving + // up after a few minutes in case somebody ships an app with + // the debuggable flag set. + control_ctx_.reset(adbconnection_client_new(info_ptrs, std::size(infos))); + if (control_ctx_) { + return true; + } + + // We failed to connect. + usleep(sleep_ms * 1000); + + sleep_ms += (sleep_ms >> 1); + if (sleep_ms > sleep_max_ms) { + sleep_ms = sleep_max_ms; + } + } + + return false; +} + +void AdbConnectionState::RunPollLoop(art::Thread* self) { + CHECK_NE(agent_name_, ""); + CHECK_EQ(self->GetState(), art::kNative); + art::Locks::mutator_lock_->AssertNotHeld(self); + self->SetState(art::kWaitingInMainDebuggerLoop); + // shutting_down_ set by StopDebuggerThreads + while (!shutting_down_) { + // First, connect to adbd if we haven't already. + if (!control_ctx_ && !SetupAdbConnection()) { + LOG(ERROR) << "Failed to setup adb connection."; + return; + } + while (!shutting_down_ && control_ctx_) { + bool should_listen_on_connection = !agent_has_socket_ && !sent_agent_fds_; + struct pollfd pollfds[4] = { + { sleep_event_fd_, POLLIN, 0 }, + // -1 as an fd causes it to be ignored by poll + { (agent_loaded_ ? local_agent_control_sock_ : -1), POLLIN, 0 }, + // Check for the control_sock_ actually going away. Only do this if we don't have an active + // connection. + { (adb_connection_socket_ == -1 ? adbconnection_client_pollfd(control_ctx_.get()) : -1), + POLLIN | POLLRDHUP, 0 }, + // if we have not loaded the agent either the adb_connection_socket_ is -1 meaning we don't + // have a real connection yet or the socket through adb needs to be listened to for incoming + // data that the agent or this plugin can handle. + { should_listen_on_connection ? adb_connection_socket_ : -1, POLLIN | POLLRDHUP, 0 } + }; + int res = TEMP_FAILURE_RETRY(poll(pollfds, 4, -1)); + if (res < 0) { + PLOG(ERROR) << "Failed to poll!"; + return; + } + // We don't actually care about doing this we just use it to wake us up. + // const struct pollfd& sleep_event_poll = pollfds[0]; + const struct pollfd& agent_control_sock_poll = pollfds[1]; + const struct pollfd& control_sock_poll = pollfds[2]; + const struct pollfd& adb_socket_poll = pollfds[3]; + if (FlagsSet(agent_control_sock_poll.revents, POLLIN)) { + DCHECK(agent_loaded_); + char buf[257]; + res = TEMP_FAILURE_RETRY(recv(local_agent_control_sock_, buf, sizeof(buf) - 1, 0)); + if (res < 0) { + PLOG(ERROR) << "Failed to read message from agent control socket! Retrying"; + continue; + } else { + buf[res + 1] = '\0'; + VLOG(jdwp) << "Local agent control sock has data: " << static_cast(buf); + } + if (memcmp(kListenStartMessage, buf, sizeof(kListenStartMessage)) == 0) { + agent_listening_ = true; + if (adb_connection_socket_ != -1) { + SendAgentFds(/*require_handshake=*/ !performed_handshake_); + } + } else if (memcmp(kListenEndMessage, buf, sizeof(kListenEndMessage)) == 0) { + agent_listening_ = false; + } else if (memcmp(kCloseMessage, buf, sizeof(kCloseMessage)) == 0) { + CloseFds(); + agent_has_socket_ = false; + } else if (memcmp(kAcceptMessage, buf, sizeof(kAcceptMessage)) == 0) { + agent_has_socket_ = true; + sent_agent_fds_ = false; + // We will only ever do the handshake once so reset this. + performed_handshake_ = false; + } else { + LOG(ERROR) << "Unknown message received from debugger! '" << std::string(buf) << "'"; + } + } else if (FlagsSet(control_sock_poll.revents, POLLIN)) { + bool maybe_send_fds = false; + { + // Hold onto this lock so that concurrent ddm publishes don't try to use an illegal fd. + ScopedEventFdLock sefdl(adb_write_event_fd_); + android::base::unique_fd new_fd(adbconnection_client_receive_jdwp_fd(control_ctx_.get())); + if (new_fd == -1) { + // Something went wrong. We need to retry getting the control socket. + control_ctx_.reset(); + break; + } else if (adb_connection_socket_ != -1) { + // We already have a connection. + VLOG(jdwp) << "Ignoring second debugger. Accept then drop!"; + if (new_fd >= 0) { + new_fd.reset(); + } + } else { + VLOG(jdwp) << "Adb connection established with fd " << new_fd; + adb_connection_socket_ = std::move(new_fd); + maybe_send_fds = true; + } + } + if (maybe_send_fds && agent_loaded_ && agent_listening_) { + VLOG(jdwp) << "Sending fds as soon as we received them."; + // The agent was already loaded so this must be after a disconnection. Therefore have the + // transport perform the handshake. + SendAgentFds(/*require_handshake=*/ true); + } + } else if (FlagsSet(control_sock_poll.revents, POLLRDHUP)) { + // The other end of the adb connection just dropped it. + // Reset the connection since we don't have an active socket through the adb server. + DCHECK(!agent_has_socket_) << "We shouldn't be doing anything if there is already a " + << "connection active"; + control_ctx_.reset(); + break; + } else if (FlagsSet(adb_socket_poll.revents, POLLIN)) { + DCHECK(!agent_has_socket_); + if (!agent_loaded_) { + HandleDataWithoutAgent(self); + } else if (agent_listening_ && !sent_agent_fds_) { + VLOG(jdwp) << "Sending agent fds again on data."; + // Agent was already loaded so it can deal with the handshake. + SendAgentFds(/*require_handshake=*/ true); + } + } else if (FlagsSet(adb_socket_poll.revents, POLLRDHUP)) { + DCHECK(!agent_has_socket_); + CloseFds(); + } else { + VLOG(jdwp) << "Woke up poll without anything to do!"; + } + } + } +} + +static uint32_t ReadUint32AndAdvance(/*in-out*/uint8_t** in) { + uint32_t res; + memcpy(&res, *in, sizeof(uint32_t)); + *in = (*in) + sizeof(uint32_t); + return ntohl(res); +} + +void AdbConnectionState::HandleDataWithoutAgent(art::Thread* self) { + DCHECK(!agent_loaded_); + DCHECK(!agent_listening_); + // TODO Should we check in some other way if we are userdebug/eng? + CHECK(art::Dbg::IsJdwpAllowed()); + // We try to avoid loading the agent which is expensive. First lets just perform the handshake. + if (!performed_handshake_) { + PerformHandshake(); + return; + } + // Read the packet header to figure out if it is one we can handle. We only 'peek' into the stream + // to see if it's one we can handle. This doesn't change the state of the socket. + alignas(sizeof(uint32_t)) uint8_t packet_header[kPacketHeaderLen]; + ssize_t res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(), + packet_header, + sizeof(packet_header), + MSG_PEEK)); + // We want to be very careful not to change the socket state until we know we succeeded. This will + // let us fall-back to just loading the agent and letting it deal with everything. + if (res <= 0) { + // Close the socket. We either hit EOF or an error. + if (res < 0) { + PLOG(ERROR) << "Unable to peek into adb socket due to error. Closing socket."; + } + CloseFds(); + return; + } else if (res < static_cast(kPacketHeaderLen)) { + LOG(ERROR) << "Unable to peek into adb socket. Loading agent to handle this. Only read " << res; + AttachJdwpAgent(self); + return; + } + uint32_t full_len = ntohl(*reinterpret_cast(packet_header + kPacketSizeOff)); + uint32_t pkt_id = ntohl(*reinterpret_cast(packet_header + kPacketIdOff)); + uint8_t pkt_cmd_set = packet_header[kPacketCommandSetOff]; + uint8_t pkt_cmd = packet_header[kPacketCommandOff]; + if (pkt_cmd_set != kDdmCommandSet || + pkt_cmd != kDdmChunkCommand || + full_len < kPacketHeaderLen) { + VLOG(jdwp) << "Loading agent due to jdwp packet that cannot be handled by adbconnection."; + AttachJdwpAgent(self); + return; + } + uint32_t avail = -1; + res = TEMP_FAILURE_RETRY(ioctl(adb_connection_socket_.get(), FIONREAD, &avail)); + if (res < 0) { + PLOG(ERROR) << "Failed to determine amount of readable data in socket! Closing connection"; + CloseFds(); + return; + } else if (avail < full_len) { + LOG(WARNING) << "Unable to handle ddm command in adbconnection due to insufficent data. " + << "Expected " << full_len << " bytes but only " << avail << " are readable. " + << "Loading jdwp agent to deal with this."; + AttachJdwpAgent(self); + return; + } + // Actually read the data. + std::vector full_pkt; + full_pkt.resize(full_len); + res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(), full_pkt.data(), full_len, 0)); + if (res < 0) { + PLOG(ERROR) << "Failed to recv data from adb connection. Closing connection"; + CloseFds(); + return; + } + DCHECK_EQ(memcmp(full_pkt.data(), packet_header, sizeof(packet_header)), 0); + size_t data_size = full_len - kPacketHeaderLen; + if (data_size < (sizeof(uint32_t) * 2)) { + // This is an error (the data isn't long enough) but to match historical behavior we need to + // ignore it. + return; + } + uint8_t* ddm_data = full_pkt.data() + kPacketHeaderLen; + uint32_t ddm_type = ReadUint32AndAdvance(&ddm_data); + uint32_t ddm_len = ReadUint32AndAdvance(&ddm_data); + if (ddm_len > data_size - (2 * sizeof(uint32_t))) { + // This is an error (the data isn't long enough) but to match historical behavior we need to + // ignore it. + return; + } + + if (!notified_ddm_active_) { + NotifyDdms(/*active=*/ true); + } + uint32_t reply_type; + std::vector reply; + if (!art::Dbg::DdmHandleChunk(self->GetJniEnv(), + ddm_type, + art::ArrayRef(reinterpret_cast(ddm_data), + ddm_len), + /*out*/&reply_type, + /*out*/&reply)) { + // To match historical behavior we don't send any response when there is no data to reply with. + return; + } + SendDdmPacket(pkt_id, + DdmPacketType::kReply, + reply_type, + art::ArrayRef(reply)); +} + +void AdbConnectionState::PerformHandshake() { + CHECK(!performed_handshake_); + // Check to make sure we are able to read the whole handshake. + uint32_t avail = -1; + int res = TEMP_FAILURE_RETRY(ioctl(adb_connection_socket_.get(), FIONREAD, &avail)); + if (res < 0 || avail < sizeof(kJdwpHandshake)) { + if (res < 0) { + PLOG(ERROR) << "Failed to determine amount of readable data for handshake!"; + } + LOG(WARNING) << "Closing connection to broken client."; + CloseFds(); + return; + } + // Perform the handshake. + char handshake_msg[sizeof(kJdwpHandshake)]; + res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(), + handshake_msg, + sizeof(handshake_msg), + MSG_DONTWAIT)); + if (res < static_cast(sizeof(kJdwpHandshake)) || + strncmp(handshake_msg, kJdwpHandshake, sizeof(kJdwpHandshake)) != 0) { + if (res < 0) { + PLOG(ERROR) << "Failed to read handshake!"; + } + LOG(WARNING) << "Handshake failed!"; + CloseFds(); + return; + } + // Send the handshake back. + res = TEMP_FAILURE_RETRY(send(adb_connection_socket_.get(), + kJdwpHandshake, + sizeof(kJdwpHandshake), + 0)); + if (res < static_cast(sizeof(kJdwpHandshake))) { + PLOG(ERROR) << "Failed to send jdwp-handshake response."; + CloseFds(); + return; + } + performed_handshake_ = true; +} + +void AdbConnectionState::AttachJdwpAgent(art::Thread* self) { + art::Runtime* runtime = art::Runtime::Current(); + self->AssertNoPendingException(); + runtime->AttachAgent(/* env= */ nullptr, + MakeAgentArg(), + /* class_loader= */ nullptr); + if (self->IsExceptionPending()) { + LOG(ERROR) << "Failed to load agent " << agent_name_; + art::ScopedObjectAccess soa(self); + self->GetException()->Dump(); + self->ClearException(); + return; + } + agent_loaded_ = true; +} + +bool ContainsArgument(const std::string& opts, const char* arg) { + return opts.find(arg) != std::string::npos; +} + +bool ValidateJdwpOptions(const std::string& opts) { + bool res = true; + // The adbconnection plugin requires that the jdwp agent be configured as a 'server' because that + // is what adb expects and otherwise we will hit a deadlock as the poll loop thread stops waiting + // for the fd's to be passed down. + if (ContainsArgument(opts, "server=n")) { + res = false; + LOG(ERROR) << "Cannot start jdwp debugging with server=n from adbconnection."; + } + // We don't start the jdwp agent until threads are already running. It is far too late to suspend + // everything. + if (ContainsArgument(opts, "suspend=y")) { + res = false; + LOG(ERROR) << "Cannot use suspend=y with late-init jdwp."; + } + return res; +} + +std::string AdbConnectionState::MakeAgentArg() { + const std::string& opts = art::Runtime::Current()->GetJdwpOptions(); + DCHECK(ValidateJdwpOptions(opts)); + // TODO Get agent_name_ from something user settable? + return agent_name_ + "=" + opts + (opts.empty() ? "" : ",") + + "ddm_already_active=" + (notified_ddm_active_ ? "y" : "n") + "," + + // See the comment above for why we need to be server=y. Since the agent defaults to server=n + // we will add it if it wasn't already present for the convenience of the user. + (ContainsArgument(opts, "server=y") ? "" : "server=y,") + + // See the comment above for why we need to be suspend=n. Since the agent defaults to + // suspend=y we will add it if it wasn't already present. + (ContainsArgument(opts, "suspend=n") ? "" : "suspend=n,") + + "transport=dt_fd_forward,address=" + std::to_string(remote_agent_control_sock_); +} + +void AdbConnectionState::StopDebuggerThreads() { + // The regular agent system will take care of unloading the agent (if needed). + shutting_down_ = true; + // Wakeup the poll loop. + uint64_t data = 1; + if (sleep_event_fd_ != -1) { + TEMP_FAILURE_RETRY(write(sleep_event_fd_, &data, sizeof(data))); + } +} + +// The plugin initialization function. +extern "C" bool ArtPlugin_Initialize() { + DCHECK(art::Runtime::Current()->GetJdwpProvider() == art::JdwpProvider::kAdbConnection); + // TODO Provide some way for apps to set this maybe? + gState.emplace(kDefaultJdwpAgentName); + return ValidateJdwpOptions(art::Runtime::Current()->GetJdwpOptions()); +} + +extern "C" bool ArtPlugin_Deinitialize() { + // We don't actually have to do anything here. The debugger (if one was + // attached) was shutdown by the move to the kDeath runtime phase and the + // adbconnection threads were shutdown by StopDebugger. + return true; +} + +} // namespace adbconnection diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h new file mode 100644 index 0000000..32f42ba --- /dev/null +++ b/adbconnection/adbconnection.h @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_ADBCONNECTION_ADBCONNECTION_H_ +#define ART_ADBCONNECTION_ADBCONNECTION_H_ + +#include +#include +#include +#include + +#include "android-base/unique_fd.h" +#include "adbconnection/client.h" + +#include "base/mutex.h" +#include "base/array_ref.h" +#include "runtime_callbacks.h" + +#include +#include +#include + +namespace adbconnection { + +static constexpr char kJdwpControlName[] = "\0jdwp-control"; +static constexpr char kAdbConnectionThreadName[] = "ADB-JDWP Connection Control Thread"; + +// The default jdwp agent name. +static constexpr char kDefaultJdwpAgentName[] = "libjdwp.so"; + +class AdbConnectionState; + +struct AdbConnectionDebuggerController : public art::DebuggerControlCallback { + explicit AdbConnectionDebuggerController(AdbConnectionState* connection) + : connection_(connection) {} + + // Begin running the debugger. + void StartDebugger() override; + + // The debugger should begin shutting down since the runtime is ending. + void StopDebugger() override; + + bool IsDebuggerConfigured() override; + + private: + AdbConnectionState* connection_; +}; + +enum class DdmPacketType : uint8_t { kReply = 0x80, kCmd = 0x00, }; + +struct AdbConnectionDdmCallback : public art::DdmCallback { + explicit AdbConnectionDdmCallback(AdbConnectionState* connection) : connection_(connection) {} + + void DdmPublishChunk(uint32_t type, + const art::ArrayRef& data) + REQUIRES_SHARED(art::Locks::mutator_lock_); + + private: + AdbConnectionState* connection_; +}; + +class AdbConnectionState { + public: + explicit AdbConnectionState(const std::string& name); + ~AdbConnectionState(); + + // Called on the listening thread to start dealing with new input. thr is used to attach the new + // thread to the runtime. + void RunPollLoop(art::Thread* self); + + // Sends ddms data over the socket, if there is one. This data is sent even if we haven't finished + // hand-shaking yet. + void PublishDdmData(uint32_t type, const art::ArrayRef& data); + + // Stops debugger threads during shutdown. + void StopDebuggerThreads(); + + // If StartDebuggerThreads was called successfully. + bool DebuggerThreadsStarted() { + return started_debugger_threads_; + } + + private: + uint32_t NextDdmId(); + + void StartDebuggerThreads(); + + // Tell adbd about the new runtime. + bool SetupAdbConnection(); + + std::string MakeAgentArg(); + + android::base::unique_fd ReadFdFromAdb(); + + void SendAgentFds(bool require_handshake); + + void CloseFds(); + + void HandleDataWithoutAgent(art::Thread* self); + + void PerformHandshake(); + + void AttachJdwpAgent(art::Thread* self); + + void NotifyDdms(bool active); + + void SendDdmPacket(uint32_t id, + DdmPacketType type, + uint32_t ddm_type, + art::ArrayRef data); + + std::string agent_name_; + + AdbConnectionDebuggerController controller_; + AdbConnectionDdmCallback ddm_callback_; + + // Eventfd used to allow the StopDebuggerThreads function to wake up sleeping threads + android::base::unique_fd sleep_event_fd_; + + // Context which wraps the socket which we use to talk to adbd. + std::unique_ptr control_ctx_; + + // Socket that we use to talk to the agent (if it's loaded). + android::base::unique_fd local_agent_control_sock_; + + // The fd of the socket the agent uses to talk to us. We need to keep it around in order to clean + // it up when the runtime goes away. + android::base::unique_fd remote_agent_control_sock_; + + // The fd that is forwarded through adb to the client. This is guarded by the + // adb_write_event_fd_. + android::base::unique_fd adb_connection_socket_; + + // The fd we send to the agent to let us synchronize access to the shared adb_connection_socket_. + // This is also used as a general lock for the adb_connection_socket_ on any threads other than + // the poll thread. + android::base::unique_fd adb_write_event_fd_; + + std::atomic shutting_down_; + + // True if we have loaded the agent library. + std::atomic agent_loaded_; + + // True if the dt_fd_forward transport is listening for a new communication channel. + std::atomic agent_listening_; + + // True if the dt_fd_forward transport has the socket. If so we don't do anything to the agent or + // the adb connection socket until connection goes away. + std::atomic agent_has_socket_; + + std::atomic sent_agent_fds_; + + bool performed_handshake_; + + bool notified_ddm_active_; + + std::atomic next_ddm_id_; + + bool started_debugger_threads_; + + socklen_t control_addr_len_; + union { + sockaddr_un controlAddrUn; + sockaddr controlAddrPlain; + } control_addr_; + + friend struct AdbConnectionDebuggerController; +}; + +} // namespace adbconnection + +#endif // ART_ADBCONNECTION_ADBCONNECTION_H_ diff --git a/benchmark/Android.bp b/benchmark/Android.bp new file mode 100644 index 0000000..3995ca2 --- /dev/null +++ b/benchmark/Android.bp @@ -0,0 +1,60 @@ +// +// Copyright (C) 2015 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +art_cc_library { + name: "libartbenchmark", + host_supported: true, + defaults: ["art_defaults"], + srcs: [ + "jni_loader.cc", + "jobject-benchmark/jobject_benchmark.cc", + "jni-perf/perf_jni.cc", + "micro-native/micro_native.cc", + "scoped-primitive-array/scoped_primitive_array.cc", + ], + shared_libs: [ + "libart", + "libbacktrace", + "libbase", + "libnativehelper", + ], + cflags: [ + "-Wno-frame-larger-than=", + ], +} + +art_cc_library { + name: "libartbenchmark-micronative-host", + host_supported: true, + device_supported: false, + defaults: [ + "art_debug_defaults", + "art_defaults", + ], + srcs: [ + "jni_loader.cc", + "micro-native/micro_native.cc", + ], + shared_libs: [ + ], + static_libs: [ + ], + header_libs: ["jni_headers"], + stl: "libc++_static", + cflags: [ + "-Wno-frame-larger-than=", + ], +} diff --git a/benchmark/const-class/info.txt b/benchmark/const-class/info.txt new file mode 100644 index 0000000..ed0b827 --- /dev/null +++ b/benchmark/const-class/info.txt @@ -0,0 +1 @@ +Benchmarks for repeating const-class instructions in a loop. diff --git a/benchmark/const-class/src/ConstClassBenchmark.java b/benchmark/const-class/src/ConstClassBenchmark.java new file mode 100644 index 0000000..d45b49f --- /dev/null +++ b/benchmark/const-class/src/ConstClassBenchmark.java @@ -0,0 +1,1071 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class ConstClassBenchmark { + // Define 1025 classes with consecutive type indexes in the dex file. + // The tests below rely on the knowledge that ART uses the low 10 bits + // of the type index as the hash into DexCache types array. + // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10). + public static class TestClass_0000 {} + public static class TestClass_0001 {} + public static class TestClass_0002 {} + public static class TestClass_0003 {} + public static class TestClass_0004 {} + public static class TestClass_0005 {} + public static class TestClass_0006 {} + public static class TestClass_0007 {} + public static class TestClass_0008 {} + public static class TestClass_0009 {} + public static class TestClass_0010 {} + public static class TestClass_0011 {} + public static class TestClass_0012 {} + public static class TestClass_0013 {} + public static class TestClass_0014 {} + public static class TestClass_0015 {} + public static class TestClass_0016 {} + public static class TestClass_0017 {} + public static class TestClass_0018 {} + public static class TestClass_0019 {} + public static class TestClass_0020 {} + public static class TestClass_0021 {} + public static class TestClass_0022 {} + public static class TestClass_0023 {} + public static class TestClass_0024 {} + public static class TestClass_0025 {} + public static class TestClass_0026 {} + public static class TestClass_0027 {} + public static class TestClass_0028 {} + public static class TestClass_0029 {} + public static class TestClass_0030 {} + public static class TestClass_0031 {} + public static class TestClass_0032 {} + public static class TestClass_0033 {} + public static class TestClass_0034 {} + public static class TestClass_0035 {} + public static class TestClass_0036 {} + public static class TestClass_0037 {} + public static class TestClass_0038 {} + public static class TestClass_0039 {} + public static class TestClass_0040 {} + public static class TestClass_0041 {} + public static class TestClass_0042 {} + public static class TestClass_0043 {} + public static class TestClass_0044 {} + public static class TestClass_0045 {} + public static class TestClass_0046 {} + public static class TestClass_0047 {} + public static class TestClass_0048 {} + public static class TestClass_0049 {} + public static class TestClass_0050 {} + public static class TestClass_0051 {} + public static class TestClass_0052 {} + public static class TestClass_0053 {} + public static class TestClass_0054 {} + public static class TestClass_0055 {} + public static class TestClass_0056 {} + public static class TestClass_0057 {} + public static class TestClass_0058 {} + public static class TestClass_0059 {} + public static class TestClass_0060 {} + public static class TestClass_0061 {} + public static class TestClass_0062 {} + public static class TestClass_0063 {} + public static class TestClass_0064 {} + public static class TestClass_0065 {} + public static class TestClass_0066 {} + public static class TestClass_0067 {} + public static class TestClass_0068 {} + public static class TestClass_0069 {} + public static class TestClass_0070 {} + public static class TestClass_0071 {} + public static class TestClass_0072 {} + public static class TestClass_0073 {} + public static class TestClass_0074 {} + public static class TestClass_0075 {} + public static class TestClass_0076 {} + public static class TestClass_0077 {} + public static class TestClass_0078 {} + public static class TestClass_0079 {} + public static class TestClass_0080 {} + public static class TestClass_0081 {} + public static class TestClass_0082 {} + public static class TestClass_0083 {} + public static class TestClass_0084 {} + public static class TestClass_0085 {} + public static class TestClass_0086 {} + public static class TestClass_0087 {} + public static class TestClass_0088 {} + public static class TestClass_0089 {} + public static class TestClass_0090 {} + public static class TestClass_0091 {} + public static class TestClass_0092 {} + public static class TestClass_0093 {} + public static class TestClass_0094 {} + public static class TestClass_0095 {} + public static class TestClass_0096 {} + public static class TestClass_0097 {} + public static class TestClass_0098 {} + public static class TestClass_0099 {} + public static class TestClass_0100 {} + public static class TestClass_0101 {} + public static class TestClass_0102 {} + public static class TestClass_0103 {} + public static class TestClass_0104 {} + public static class TestClass_0105 {} + public static class TestClass_0106 {} + public static class TestClass_0107 {} + public static class TestClass_0108 {} + public static class TestClass_0109 {} + public static class TestClass_0110 {} + public static class TestClass_0111 {} + public static class TestClass_0112 {} + public static class TestClass_0113 {} + public static class TestClass_0114 {} + public static class TestClass_0115 {} + public static class TestClass_0116 {} + public static class TestClass_0117 {} + public static class TestClass_0118 {} + public static class TestClass_0119 {} + public static class TestClass_0120 {} + public static class TestClass_0121 {} + public static class TestClass_0122 {} + public static class TestClass_0123 {} + public static class TestClass_0124 {} + public static class TestClass_0125 {} + public static class TestClass_0126 {} + public static class TestClass_0127 {} + public static class TestClass_0128 {} + public static class TestClass_0129 {} + public static class TestClass_0130 {} + public static class TestClass_0131 {} + public static class TestClass_0132 {} + public static class TestClass_0133 {} + public static class TestClass_0134 {} + public static class TestClass_0135 {} + public static class TestClass_0136 {} + public static class TestClass_0137 {} + public static class TestClass_0138 {} + public static class TestClass_0139 {} + public static class TestClass_0140 {} + public static class TestClass_0141 {} + public static class TestClass_0142 {} + public static class TestClass_0143 {} + public static class TestClass_0144 {} + public static class TestClass_0145 {} + public static class TestClass_0146 {} + public static class TestClass_0147 {} + public static class TestClass_0148 {} + public static class TestClass_0149 {} + public static class TestClass_0150 {} + public static class TestClass_0151 {} + public static class TestClass_0152 {} + public static class TestClass_0153 {} + public static class TestClass_0154 {} + public static class TestClass_0155 {} + public static class TestClass_0156 {} + public static class TestClass_0157 {} + public static class TestClass_0158 {} + public static class TestClass_0159 {} + public static class TestClass_0160 {} + public static class TestClass_0161 {} + public static class TestClass_0162 {} + public static class TestClass_0163 {} + public static class TestClass_0164 {} + public static class TestClass_0165 {} + public static class TestClass_0166 {} + public static class TestClass_0167 {} + public static class TestClass_0168 {} + public static class TestClass_0169 {} + public static class TestClass_0170 {} + public static class TestClass_0171 {} + public static class TestClass_0172 {} + public static class TestClass_0173 {} + public static class TestClass_0174 {} + public static class TestClass_0175 {} + public static class TestClass_0176 {} + public static class TestClass_0177 {} + public static class TestClass_0178 {} + public static class TestClass_0179 {} + public static class TestClass_0180 {} + public static class TestClass_0181 {} + public static class TestClass_0182 {} + public static class TestClass_0183 {} + public static class TestClass_0184 {} + public static class TestClass_0185 {} + public static class TestClass_0186 {} + public static class TestClass_0187 {} + public static class TestClass_0188 {} + public static class TestClass_0189 {} + public static class TestClass_0190 {} + public static class TestClass_0191 {} + public static class TestClass_0192 {} + public static class TestClass_0193 {} + public static class TestClass_0194 {} + public static class TestClass_0195 {} + public static class TestClass_0196 {} + public static class TestClass_0197 {} + public static class TestClass_0198 {} + public static class TestClass_0199 {} + public static class TestClass_0200 {} + public static class TestClass_0201 {} + public static class TestClass_0202 {} + public static class TestClass_0203 {} + public static class TestClass_0204 {} + public static class TestClass_0205 {} + public static class TestClass_0206 {} + public static class TestClass_0207 {} + public static class TestClass_0208 {} + public static class TestClass_0209 {} + public static class TestClass_0210 {} + public static class TestClass_0211 {} + public static class TestClass_0212 {} + public static class TestClass_0213 {} + public static class TestClass_0214 {} + public static class TestClass_0215 {} + public static class TestClass_0216 {} + public static class TestClass_0217 {} + public static class TestClass_0218 {} + public static class TestClass_0219 {} + public static class TestClass_0220 {} + public static class TestClass_0221 {} + public static class TestClass_0222 {} + public static class TestClass_0223 {} + public static class TestClass_0224 {} + public static class TestClass_0225 {} + public static class TestClass_0226 {} + public static class TestClass_0227 {} + public static class TestClass_0228 {} + public static class TestClass_0229 {} + public static class TestClass_0230 {} + public static class TestClass_0231 {} + public static class TestClass_0232 {} + public static class TestClass_0233 {} + public static class TestClass_0234 {} + public static class TestClass_0235 {} + public static class TestClass_0236 {} + public static class TestClass_0237 {} + public static class TestClass_0238 {} + public static class TestClass_0239 {} + public static class TestClass_0240 {} + public static class TestClass_0241 {} + public static class TestClass_0242 {} + public static class TestClass_0243 {} + public static class TestClass_0244 {} + public static class TestClass_0245 {} + public static class TestClass_0246 {} + public static class TestClass_0247 {} + public static class TestClass_0248 {} + public static class TestClass_0249 {} + public static class TestClass_0250 {} + public static class TestClass_0251 {} + public static class TestClass_0252 {} + public static class TestClass_0253 {} + public static class TestClass_0254 {} + public static class TestClass_0255 {} + public static class TestClass_0256 {} + public static class TestClass_0257 {} + public static class TestClass_0258 {} + public static class TestClass_0259 {} + public static class TestClass_0260 {} + public static class TestClass_0261 {} + public static class TestClass_0262 {} + public static class TestClass_0263 {} + public static class TestClass_0264 {} + public static class TestClass_0265 {} + public static class TestClass_0266 {} + public static class TestClass_0267 {} + public static class TestClass_0268 {} + public static class TestClass_0269 {} + public static class TestClass_0270 {} + public static class TestClass_0271 {} + public static class TestClass_0272 {} + public static class TestClass_0273 {} + public static class TestClass_0274 {} + public static class TestClass_0275 {} + public static class TestClass_0276 {} + public static class TestClass_0277 {} + public static class TestClass_0278 {} + public static class TestClass_0279 {} + public static class TestClass_0280 {} + public static class TestClass_0281 {} + public static class TestClass_0282 {} + public static class TestClass_0283 {} + public static class TestClass_0284 {} + public static class TestClass_0285 {} + public static class TestClass_0286 {} + public static class TestClass_0287 {} + public static class TestClass_0288 {} + public static class TestClass_0289 {} + public static class TestClass_0290 {} + public static class TestClass_0291 {} + public static class TestClass_0292 {} + public static class TestClass_0293 {} + public static class TestClass_0294 {} + public static class TestClass_0295 {} + public static class TestClass_0296 {} + public static class TestClass_0297 {} + public static class TestClass_0298 {} + public static class TestClass_0299 {} + public static class TestClass_0300 {} + public static class TestClass_0301 {} + public static class TestClass_0302 {} + public static class TestClass_0303 {} + public static class TestClass_0304 {} + public static class TestClass_0305 {} + public static class TestClass_0306 {} + public static class TestClass_0307 {} + public static class TestClass_0308 {} + public static class TestClass_0309 {} + public static class TestClass_0310 {} + public static class TestClass_0311 {} + public static class TestClass_0312 {} + public static class TestClass_0313 {} + public static class TestClass_0314 {} + public static class TestClass_0315 {} + public static class TestClass_0316 {} + public static class TestClass_0317 {} + public static class TestClass_0318 {} + public static class TestClass_0319 {} + public static class TestClass_0320 {} + public static class TestClass_0321 {} + public static class TestClass_0322 {} + public static class TestClass_0323 {} + public static class TestClass_0324 {} + public static class TestClass_0325 {} + public static class TestClass_0326 {} + public static class TestClass_0327 {} + public static class TestClass_0328 {} + public static class TestClass_0329 {} + public static class TestClass_0330 {} + public static class TestClass_0331 {} + public static class TestClass_0332 {} + public static class TestClass_0333 {} + public static class TestClass_0334 {} + public static class TestClass_0335 {} + public static class TestClass_0336 {} + public static class TestClass_0337 {} + public static class TestClass_0338 {} + public static class TestClass_0339 {} + public static class TestClass_0340 {} + public static class TestClass_0341 {} + public static class TestClass_0342 {} + public static class TestClass_0343 {} + public static class TestClass_0344 {} + public static class TestClass_0345 {} + public static class TestClass_0346 {} + public static class TestClass_0347 {} + public static class TestClass_0348 {} + public static class TestClass_0349 {} + public static class TestClass_0350 {} + public static class TestClass_0351 {} + public static class TestClass_0352 {} + public static class TestClass_0353 {} + public static class TestClass_0354 {} + public static class TestClass_0355 {} + public static class TestClass_0356 {} + public static class TestClass_0357 {} + public static class TestClass_0358 {} + public static class TestClass_0359 {} + public static class TestClass_0360 {} + public static class TestClass_0361 {} + public static class TestClass_0362 {} + public static class TestClass_0363 {} + public static class TestClass_0364 {} + public static class TestClass_0365 {} + public static class TestClass_0366 {} + public static class TestClass_0367 {} + public static class TestClass_0368 {} + public static class TestClass_0369 {} + public static class TestClass_0370 {} + public static class TestClass_0371 {} + public static class TestClass_0372 {} + public static class TestClass_0373 {} + public static class TestClass_0374 {} + public static class TestClass_0375 {} + public static class TestClass_0376 {} + public static class TestClass_0377 {} + public static class TestClass_0378 {} + public static class TestClass_0379 {} + public static class TestClass_0380 {} + public static class TestClass_0381 {} + public static class TestClass_0382 {} + public static class TestClass_0383 {} + public static class TestClass_0384 {} + public static class TestClass_0385 {} + public static class TestClass_0386 {} + public static class TestClass_0387 {} + public static class TestClass_0388 {} + public static class TestClass_0389 {} + public static class TestClass_0390 {} + public static class TestClass_0391 {} + public static class TestClass_0392 {} + public static class TestClass_0393 {} + public static class TestClass_0394 {} + public static class TestClass_0395 {} + public static class TestClass_0396 {} + public static class TestClass_0397 {} + public static class TestClass_0398 {} + public static class TestClass_0399 {} + public static class TestClass_0400 {} + public static class TestClass_0401 {} + public static class TestClass_0402 {} + public static class TestClass_0403 {} + public static class TestClass_0404 {} + public static class TestClass_0405 {} + public static class TestClass_0406 {} + public static class TestClass_0407 {} + public static class TestClass_0408 {} + public static class TestClass_0409 {} + public static class TestClass_0410 {} + public static class TestClass_0411 {} + public static class TestClass_0412 {} + public static class TestClass_0413 {} + public static class TestClass_0414 {} + public static class TestClass_0415 {} + public static class TestClass_0416 {} + public static class TestClass_0417 {} + public static class TestClass_0418 {} + public static class TestClass_0419 {} + public static class TestClass_0420 {} + public static class TestClass_0421 {} + public static class TestClass_0422 {} + public static class TestClass_0423 {} + public static class TestClass_0424 {} + public static class TestClass_0425 {} + public static class TestClass_0426 {} + public static class TestClass_0427 {} + public static class TestClass_0428 {} + public static class TestClass_0429 {} + public static class TestClass_0430 {} + public static class TestClass_0431 {} + public static class TestClass_0432 {} + public static class TestClass_0433 {} + public static class TestClass_0434 {} + public static class TestClass_0435 {} + public static class TestClass_0436 {} + public static class TestClass_0437 {} + public static class TestClass_0438 {} + public static class TestClass_0439 {} + public static class TestClass_0440 {} + public static class TestClass_0441 {} + public static class TestClass_0442 {} + public static class TestClass_0443 {} + public static class TestClass_0444 {} + public static class TestClass_0445 {} + public static class TestClass_0446 {} + public static class TestClass_0447 {} + public static class TestClass_0448 {} + public static class TestClass_0449 {} + public static class TestClass_0450 {} + public static class TestClass_0451 {} + public static class TestClass_0452 {} + public static class TestClass_0453 {} + public static class TestClass_0454 {} + public static class TestClass_0455 {} + public static class TestClass_0456 {} + public static class TestClass_0457 {} + public static class TestClass_0458 {} + public static class TestClass_0459 {} + public static class TestClass_0460 {} + public static class TestClass_0461 {} + public static class TestClass_0462 {} + public static class TestClass_0463 {} + public static class TestClass_0464 {} + public static class TestClass_0465 {} + public static class TestClass_0466 {} + public static class TestClass_0467 {} + public static class TestClass_0468 {} + public static class TestClass_0469 {} + public static class TestClass_0470 {} + public static class TestClass_0471 {} + public static class TestClass_0472 {} + public static class TestClass_0473 {} + public static class TestClass_0474 {} + public static class TestClass_0475 {} + public static class TestClass_0476 {} + public static class TestClass_0477 {} + public static class TestClass_0478 {} + public static class TestClass_0479 {} + public static class TestClass_0480 {} + public static class TestClass_0481 {} + public static class TestClass_0482 {} + public static class TestClass_0483 {} + public static class TestClass_0484 {} + public static class TestClass_0485 {} + public static class TestClass_0486 {} + public static class TestClass_0487 {} + public static class TestClass_0488 {} + public static class TestClass_0489 {} + public static class TestClass_0490 {} + public static class TestClass_0491 {} + public static class TestClass_0492 {} + public static class TestClass_0493 {} + public static class TestClass_0494 {} + public static class TestClass_0495 {} + public static class TestClass_0496 {} + public static class TestClass_0497 {} + public static class TestClass_0498 {} + public static class TestClass_0499 {} + public static class TestClass_0500 {} + public static class TestClass_0501 {} + public static class TestClass_0502 {} + public static class TestClass_0503 {} + public static class TestClass_0504 {} + public static class TestClass_0505 {} + public static class TestClass_0506 {} + public static class TestClass_0507 {} + public static class TestClass_0508 {} + public static class TestClass_0509 {} + public static class TestClass_0510 {} + public static class TestClass_0511 {} + public static class TestClass_0512 {} + public static class TestClass_0513 {} + public static class TestClass_0514 {} + public static class TestClass_0515 {} + public static class TestClass_0516 {} + public static class TestClass_0517 {} + public static class TestClass_0518 {} + public static class TestClass_0519 {} + public static class TestClass_0520 {} + public static class TestClass_0521 {} + public static class TestClass_0522 {} + public static class TestClass_0523 {} + public static class TestClass_0524 {} + public static class TestClass_0525 {} + public static class TestClass_0526 {} + public static class TestClass_0527 {} + public static class TestClass_0528 {} + public static class TestClass_0529 {} + public static class TestClass_0530 {} + public static class TestClass_0531 {} + public static class TestClass_0532 {} + public static class TestClass_0533 {} + public static class TestClass_0534 {} + public static class TestClass_0535 {} + public static class TestClass_0536 {} + public static class TestClass_0537 {} + public static class TestClass_0538 {} + public static class TestClass_0539 {} + public static class TestClass_0540 {} + public static class TestClass_0541 {} + public static class TestClass_0542 {} + public static class TestClass_0543 {} + public static class TestClass_0544 {} + public static class TestClass_0545 {} + public static class TestClass_0546 {} + public static class TestClass_0547 {} + public static class TestClass_0548 {} + public static class TestClass_0549 {} + public static class TestClass_0550 {} + public static class TestClass_0551 {} + public static class TestClass_0552 {} + public static class TestClass_0553 {} + public static class TestClass_0554 {} + public static class TestClass_0555 {} + public static class TestClass_0556 {} + public static class TestClass_0557 {} + public static class TestClass_0558 {} + public static class TestClass_0559 {} + public static class TestClass_0560 {} + public static class TestClass_0561 {} + public static class TestClass_0562 {} + public static class TestClass_0563 {} + public static class TestClass_0564 {} + public static class TestClass_0565 {} + public static class TestClass_0566 {} + public static class TestClass_0567 {} + public static class TestClass_0568 {} + public static class TestClass_0569 {} + public static class TestClass_0570 {} + public static class TestClass_0571 {} + public static class TestClass_0572 {} + public static class TestClass_0573 {} + public static class TestClass_0574 {} + public static class TestClass_0575 {} + public static class TestClass_0576 {} + public static class TestClass_0577 {} + public static class TestClass_0578 {} + public static class TestClass_0579 {} + public static class TestClass_0580 {} + public static class TestClass_0581 {} + public static class TestClass_0582 {} + public static class TestClass_0583 {} + public static class TestClass_0584 {} + public static class TestClass_0585 {} + public static class TestClass_0586 {} + public static class TestClass_0587 {} + public static class TestClass_0588 {} + public static class TestClass_0589 {} + public static class TestClass_0590 {} + public static class TestClass_0591 {} + public static class TestClass_0592 {} + public static class TestClass_0593 {} + public static class TestClass_0594 {} + public static class TestClass_0595 {} + public static class TestClass_0596 {} + public static class TestClass_0597 {} + public static class TestClass_0598 {} + public static class TestClass_0599 {} + public static class TestClass_0600 {} + public static class TestClass_0601 {} + public static class TestClass_0602 {} + public static class TestClass_0603 {} + public static class TestClass_0604 {} + public static class TestClass_0605 {} + public static class TestClass_0606 {} + public static class TestClass_0607 {} + public static class TestClass_0608 {} + public static class TestClass_0609 {} + public static class TestClass_0610 {} + public static class TestClass_0611 {} + public static class TestClass_0612 {} + public static class TestClass_0613 {} + public static class TestClass_0614 {} + public static class TestClass_0615 {} + public static class TestClass_0616 {} + public static class TestClass_0617 {} + public static class TestClass_0618 {} + public static class TestClass_0619 {} + public static class TestClass_0620 {} + public static class TestClass_0621 {} + public static class TestClass_0622 {} + public static class TestClass_0623 {} + public static class TestClass_0624 {} + public static class TestClass_0625 {} + public static class TestClass_0626 {} + public static class TestClass_0627 {} + public static class TestClass_0628 {} + public static class TestClass_0629 {} + public static class TestClass_0630 {} + public static class TestClass_0631 {} + public static class TestClass_0632 {} + public static class TestClass_0633 {} + public static class TestClass_0634 {} + public static class TestClass_0635 {} + public static class TestClass_0636 {} + public static class TestClass_0637 {} + public static class TestClass_0638 {} + public static class TestClass_0639 {} + public static class TestClass_0640 {} + public static class TestClass_0641 {} + public static class TestClass_0642 {} + public static class TestClass_0643 {} + public static class TestClass_0644 {} + public static class TestClass_0645 {} + public static class TestClass_0646 {} + public static class TestClass_0647 {} + public static class TestClass_0648 {} + public static class TestClass_0649 {} + public static class TestClass_0650 {} + public static class TestClass_0651 {} + public static class TestClass_0652 {} + public static class TestClass_0653 {} + public static class TestClass_0654 {} + public static class TestClass_0655 {} + public static class TestClass_0656 {} + public static class TestClass_0657 {} + public static class TestClass_0658 {} + public static class TestClass_0659 {} + public static class TestClass_0660 {} + public static class TestClass_0661 {} + public static class TestClass_0662 {} + public static class TestClass_0663 {} + public static class TestClass_0664 {} + public static class TestClass_0665 {} + public static class TestClass_0666 {} + public static class TestClass_0667 {} + public static class TestClass_0668 {} + public static class TestClass_0669 {} + public static class TestClass_0670 {} + public static class TestClass_0671 {} + public static class TestClass_0672 {} + public static class TestClass_0673 {} + public static class TestClass_0674 {} + public static class TestClass_0675 {} + public static class TestClass_0676 {} + public static class TestClass_0677 {} + public static class TestClass_0678 {} + public static class TestClass_0679 {} + public static class TestClass_0680 {} + public static class TestClass_0681 {} + public static class TestClass_0682 {} + public static class TestClass_0683 {} + public static class TestClass_0684 {} + public static class TestClass_0685 {} + public static class TestClass_0686 {} + public static class TestClass_0687 {} + public static class TestClass_0688 {} + public static class TestClass_0689 {} + public static class TestClass_0690 {} + public static class TestClass_0691 {} + public static class TestClass_0692 {} + public static class TestClass_0693 {} + public static class TestClass_0694 {} + public static class TestClass_0695 {} + public static class TestClass_0696 {} + public static class TestClass_0697 {} + public static class TestClass_0698 {} + public static class TestClass_0699 {} + public static class TestClass_0700 {} + public static class TestClass_0701 {} + public static class TestClass_0702 {} + public static class TestClass_0703 {} + public static class TestClass_0704 {} + public static class TestClass_0705 {} + public static class TestClass_0706 {} + public static class TestClass_0707 {} + public static class TestClass_0708 {} + public static class TestClass_0709 {} + public static class TestClass_0710 {} + public static class TestClass_0711 {} + public static class TestClass_0712 {} + public static class TestClass_0713 {} + public static class TestClass_0714 {} + public static class TestClass_0715 {} + public static class TestClass_0716 {} + public static class TestClass_0717 {} + public static class TestClass_0718 {} + public static class TestClass_0719 {} + public static class TestClass_0720 {} + public static class TestClass_0721 {} + public static class TestClass_0722 {} + public static class TestClass_0723 {} + public static class TestClass_0724 {} + public static class TestClass_0725 {} + public static class TestClass_0726 {} + public static class TestClass_0727 {} + public static class TestClass_0728 {} + public static class TestClass_0729 {} + public static class TestClass_0730 {} + public static class TestClass_0731 {} + public static class TestClass_0732 {} + public static class TestClass_0733 {} + public static class TestClass_0734 {} + public static class TestClass_0735 {} + public static class TestClass_0736 {} + public static class TestClass_0737 {} + public static class TestClass_0738 {} + public static class TestClass_0739 {} + public static class TestClass_0740 {} + public static class TestClass_0741 {} + public static class TestClass_0742 {} + public static class TestClass_0743 {} + public static class TestClass_0744 {} + public static class TestClass_0745 {} + public static class TestClass_0746 {} + public static class TestClass_0747 {} + public static class TestClass_0748 {} + public static class TestClass_0749 {} + public static class TestClass_0750 {} + public static class TestClass_0751 {} + public static class TestClass_0752 {} + public static class TestClass_0753 {} + public static class TestClass_0754 {} + public static class TestClass_0755 {} + public static class TestClass_0756 {} + public static class TestClass_0757 {} + public static class TestClass_0758 {} + public static class TestClass_0759 {} + public static class TestClass_0760 {} + public static class TestClass_0761 {} + public static class TestClass_0762 {} + public static class TestClass_0763 {} + public static class TestClass_0764 {} + public static class TestClass_0765 {} + public static class TestClass_0766 {} + public static class TestClass_0767 {} + public static class TestClass_0768 {} + public static class TestClass_0769 {} + public static class TestClass_0770 {} + public static class TestClass_0771 {} + public static class TestClass_0772 {} + public static class TestClass_0773 {} + public static class TestClass_0774 {} + public static class TestClass_0775 {} + public static class TestClass_0776 {} + public static class TestClass_0777 {} + public static class TestClass_0778 {} + public static class TestClass_0779 {} + public static class TestClass_0780 {} + public static class TestClass_0781 {} + public static class TestClass_0782 {} + public static class TestClass_0783 {} + public static class TestClass_0784 {} + public static class TestClass_0785 {} + public static class TestClass_0786 {} + public static class TestClass_0787 {} + public static class TestClass_0788 {} + public static class TestClass_0789 {} + public static class TestClass_0790 {} + public static class TestClass_0791 {} + public static class TestClass_0792 {} + public static class TestClass_0793 {} + public static class TestClass_0794 {} + public static class TestClass_0795 {} + public static class TestClass_0796 {} + public static class TestClass_0797 {} + public static class TestClass_0798 {} + public static class TestClass_0799 {} + public static class TestClass_0800 {} + public static class TestClass_0801 {} + public static class TestClass_0802 {} + public static class TestClass_0803 {} + public static class TestClass_0804 {} + public static class TestClass_0805 {} + public static class TestClass_0806 {} + public static class TestClass_0807 {} + public static class TestClass_0808 {} + public static class TestClass_0809 {} + public static class TestClass_0810 {} + public static class TestClass_0811 {} + public static class TestClass_0812 {} + public static class TestClass_0813 {} + public static class TestClass_0814 {} + public static class TestClass_0815 {} + public static class TestClass_0816 {} + public static class TestClass_0817 {} + public static class TestClass_0818 {} + public static class TestClass_0819 {} + public static class TestClass_0820 {} + public static class TestClass_0821 {} + public static class TestClass_0822 {} + public static class TestClass_0823 {} + public static class TestClass_0824 {} + public static class TestClass_0825 {} + public static class TestClass_0826 {} + public static class TestClass_0827 {} + public static class TestClass_0828 {} + public static class TestClass_0829 {} + public static class TestClass_0830 {} + public static class TestClass_0831 {} + public static class TestClass_0832 {} + public static class TestClass_0833 {} + public static class TestClass_0834 {} + public static class TestClass_0835 {} + public static class TestClass_0836 {} + public static class TestClass_0837 {} + public static class TestClass_0838 {} + public static class TestClass_0839 {} + public static class TestClass_0840 {} + public static class TestClass_0841 {} + public static class TestClass_0842 {} + public static class TestClass_0843 {} + public static class TestClass_0844 {} + public static class TestClass_0845 {} + public static class TestClass_0846 {} + public static class TestClass_0847 {} + public static class TestClass_0848 {} + public static class TestClass_0849 {} + public static class TestClass_0850 {} + public static class TestClass_0851 {} + public static class TestClass_0852 {} + public static class TestClass_0853 {} + public static class TestClass_0854 {} + public static class TestClass_0855 {} + public static class TestClass_0856 {} + public static class TestClass_0857 {} + public static class TestClass_0858 {} + public static class TestClass_0859 {} + public static class TestClass_0860 {} + public static class TestClass_0861 {} + public static class TestClass_0862 {} + public static class TestClass_0863 {} + public static class TestClass_0864 {} + public static class TestClass_0865 {} + public static class TestClass_0866 {} + public static class TestClass_0867 {} + public static class TestClass_0868 {} + public static class TestClass_0869 {} + public static class TestClass_0870 {} + public static class TestClass_0871 {} + public static class TestClass_0872 {} + public static class TestClass_0873 {} + public static class TestClass_0874 {} + public static class TestClass_0875 {} + public static class TestClass_0876 {} + public static class TestClass_0877 {} + public static class TestClass_0878 {} + public static class TestClass_0879 {} + public static class TestClass_0880 {} + public static class TestClass_0881 {} + public static class TestClass_0882 {} + public static class TestClass_0883 {} + public static class TestClass_0884 {} + public static class TestClass_0885 {} + public static class TestClass_0886 {} + public static class TestClass_0887 {} + public static class TestClass_0888 {} + public static class TestClass_0889 {} + public static class TestClass_0890 {} + public static class TestClass_0891 {} + public static class TestClass_0892 {} + public static class TestClass_0893 {} + public static class TestClass_0894 {} + public static class TestClass_0895 {} + public static class TestClass_0896 {} + public static class TestClass_0897 {} + public static class TestClass_0898 {} + public static class TestClass_0899 {} + public static class TestClass_0900 {} + public static class TestClass_0901 {} + public static class TestClass_0902 {} + public static class TestClass_0903 {} + public static class TestClass_0904 {} + public static class TestClass_0905 {} + public static class TestClass_0906 {} + public static class TestClass_0907 {} + public static class TestClass_0908 {} + public static class TestClass_0909 {} + public static class TestClass_0910 {} + public static class TestClass_0911 {} + public static class TestClass_0912 {} + public static class TestClass_0913 {} + public static class TestClass_0914 {} + public static class TestClass_0915 {} + public static class TestClass_0916 {} + public static class TestClass_0917 {} + public static class TestClass_0918 {} + public static class TestClass_0919 {} + public static class TestClass_0920 {} + public static class TestClass_0921 {} + public static class TestClass_0922 {} + public static class TestClass_0923 {} + public static class TestClass_0924 {} + public static class TestClass_0925 {} + public static class TestClass_0926 {} + public static class TestClass_0927 {} + public static class TestClass_0928 {} + public static class TestClass_0929 {} + public static class TestClass_0930 {} + public static class TestClass_0931 {} + public static class TestClass_0932 {} + public static class TestClass_0933 {} + public static class TestClass_0934 {} + public static class TestClass_0935 {} + public static class TestClass_0936 {} + public static class TestClass_0937 {} + public static class TestClass_0938 {} + public static class TestClass_0939 {} + public static class TestClass_0940 {} + public static class TestClass_0941 {} + public static class TestClass_0942 {} + public static class TestClass_0943 {} + public static class TestClass_0944 {} + public static class TestClass_0945 {} + public static class TestClass_0946 {} + public static class TestClass_0947 {} + public static class TestClass_0948 {} + public static class TestClass_0949 {} + public static class TestClass_0950 {} + public static class TestClass_0951 {} + public static class TestClass_0952 {} + public static class TestClass_0953 {} + public static class TestClass_0954 {} + public static class TestClass_0955 {} + public static class TestClass_0956 {} + public static class TestClass_0957 {} + public static class TestClass_0958 {} + public static class TestClass_0959 {} + public static class TestClass_0960 {} + public static class TestClass_0961 {} + public static class TestClass_0962 {} + public static class TestClass_0963 {} + public static class TestClass_0964 {} + public static class TestClass_0965 {} + public static class TestClass_0966 {} + public static class TestClass_0967 {} + public static class TestClass_0968 {} + public static class TestClass_0969 {} + public static class TestClass_0970 {} + public static class TestClass_0971 {} + public static class TestClass_0972 {} + public static class TestClass_0973 {} + public static class TestClass_0974 {} + public static class TestClass_0975 {} + public static class TestClass_0976 {} + public static class TestClass_0977 {} + public static class TestClass_0978 {} + public static class TestClass_0979 {} + public static class TestClass_0980 {} + public static class TestClass_0981 {} + public static class TestClass_0982 {} + public static class TestClass_0983 {} + public static class TestClass_0984 {} + public static class TestClass_0985 {} + public static class TestClass_0986 {} + public static class TestClass_0987 {} + public static class TestClass_0988 {} + public static class TestClass_0989 {} + public static class TestClass_0990 {} + public static class TestClass_0991 {} + public static class TestClass_0992 {} + public static class TestClass_0993 {} + public static class TestClass_0994 {} + public static class TestClass_0995 {} + public static class TestClass_0996 {} + public static class TestClass_0997 {} + public static class TestClass_0998 {} + public static class TestClass_0999 {} + public static class TestClass_1000 {} + public static class TestClass_1001 {} + public static class TestClass_1002 {} + public static class TestClass_1003 {} + public static class TestClass_1004 {} + public static class TestClass_1005 {} + public static class TestClass_1006 {} + public static class TestClass_1007 {} + public static class TestClass_1008 {} + public static class TestClass_1009 {} + public static class TestClass_1010 {} + public static class TestClass_1011 {} + public static class TestClass_1012 {} + public static class TestClass_1013 {} + public static class TestClass_1014 {} + public static class TestClass_1015 {} + public static class TestClass_1016 {} + public static class TestClass_1017 {} + public static class TestClass_1018 {} + public static class TestClass_1019 {} + public static class TestClass_1020 {} + public static class TestClass_1021 {} + public static class TestClass_1022 {} + public static class TestClass_1023 {} + public static class TestClass_1024 {} + + public void timeConstClassWithConflict(int count) { + Class class0001 = TestClass_0001.class; + for (int i = 0; i < count; ++i) { + $noinline$foo(class0001); // Prevent LICM on the TestClass_xxxx.class below. + $noinline$foo(TestClass_0000.class); + $noinline$foo(TestClass_1024.class); + } + } + + public void timeConstClassWithoutConflict(int count) { + Class class0000 = TestClass_0000.class; + for (int i = 0; i < count; ++i) { + $noinline$foo(class0000); // Prevent LICM on the TestClass_xxxx.class below. + $noinline$foo(TestClass_0001.class); + $noinline$foo(TestClass_1023.class); + } + } + + static void $noinline$foo(Class s) { + if (doThrow) { throw new Error(); } + } + + public static boolean doThrow = false; +} diff --git a/benchmark/const-string/info.txt b/benchmark/const-string/info.txt new file mode 100644 index 0000000..78f39d2 --- /dev/null +++ b/benchmark/const-string/info.txt @@ -0,0 +1 @@ +Benchmarks for repeating const-string instructions in a loop. diff --git a/benchmark/const-string/src/ConstStringBenchmark.java b/benchmark/const-string/src/ConstStringBenchmark.java new file mode 100644 index 0000000..2359a5f --- /dev/null +++ b/benchmark/const-string/src/ConstStringBenchmark.java @@ -0,0 +1,1067 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class ConstStringBenchmark { + // Initialize 1025 strings with consecutive string indexes in the dex file. + // The tests below rely on the knowledge that ART uses the low 10 bits + // of the string index as the hash into DexCache strings array. + // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10). + public static final String string_0000 = "TestString_0000"; + public static final String string_0001 = "TestString_0001"; + public static final String string_0002 = "TestString_0002"; + public static final String string_0003 = "TestString_0003"; + public static final String string_0004 = "TestString_0004"; + public static final String string_0005 = "TestString_0005"; + public static final String string_0006 = "TestString_0006"; + public static final String string_0007 = "TestString_0007"; + public static final String string_0008 = "TestString_0008"; + public static final String string_0009 = "TestString_0009"; + public static final String string_0010 = "TestString_0010"; + public static final String string_0011 = "TestString_0011"; + public static final String string_0012 = "TestString_0012"; + public static final String string_0013 = "TestString_0013"; + public static final String string_0014 = "TestString_0014"; + public static final String string_0015 = "TestString_0015"; + public static final String string_0016 = "TestString_0016"; + public static final String string_0017 = "TestString_0017"; + public static final String string_0018 = "TestString_0018"; + public static final String string_0019 = "TestString_0019"; + public static final String string_0020 = "TestString_0020"; + public static final String string_0021 = "TestString_0021"; + public static final String string_0022 = "TestString_0022"; + public static final String string_0023 = "TestString_0023"; + public static final String string_0024 = "TestString_0024"; + public static final String string_0025 = "TestString_0025"; + public static final String string_0026 = "TestString_0026"; + public static final String string_0027 = "TestString_0027"; + public static final String string_0028 = "TestString_0028"; + public static final String string_0029 = "TestString_0029"; + public static final String string_0030 = "TestString_0030"; + public static final String string_0031 = "TestString_0031"; + public static final String string_0032 = "TestString_0032"; + public static final String string_0033 = "TestString_0033"; + public static final String string_0034 = "TestString_0034"; + public static final String string_0035 = "TestString_0035"; + public static final String string_0036 = "TestString_0036"; + public static final String string_0037 = "TestString_0037"; + public static final String string_0038 = "TestString_0038"; + public static final String string_0039 = "TestString_0039"; + public static final String string_0040 = "TestString_0040"; + public static final String string_0041 = "TestString_0041"; + public static final String string_0042 = "TestString_0042"; + public static final String string_0043 = "TestString_0043"; + public static final String string_0044 = "TestString_0044"; + public static final String string_0045 = "TestString_0045"; + public static final String string_0046 = "TestString_0046"; + public static final String string_0047 = "TestString_0047"; + public static final String string_0048 = "TestString_0048"; + public static final String string_0049 = "TestString_0049"; + public static final String string_0050 = "TestString_0050"; + public static final String string_0051 = "TestString_0051"; + public static final String string_0052 = "TestString_0052"; + public static final String string_0053 = "TestString_0053"; + public static final String string_0054 = "TestString_0054"; + public static final String string_0055 = "TestString_0055"; + public static final String string_0056 = "TestString_0056"; + public static final String string_0057 = "TestString_0057"; + public static final String string_0058 = "TestString_0058"; + public static final String string_0059 = "TestString_0059"; + public static final String string_0060 = "TestString_0060"; + public static final String string_0061 = "TestString_0061"; + public static final String string_0062 = "TestString_0062"; + public static final String string_0063 = "TestString_0063"; + public static final String string_0064 = "TestString_0064"; + public static final String string_0065 = "TestString_0065"; + public static final String string_0066 = "TestString_0066"; + public static final String string_0067 = "TestString_0067"; + public static final String string_0068 = "TestString_0068"; + public static final String string_0069 = "TestString_0069"; + public static final String string_0070 = "TestString_0070"; + public static final String string_0071 = "TestString_0071"; + public static final String string_0072 = "TestString_0072"; + public static final String string_0073 = "TestString_0073"; + public static final String string_0074 = "TestString_0074"; + public static final String string_0075 = "TestString_0075"; + public static final String string_0076 = "TestString_0076"; + public static final String string_0077 = "TestString_0077"; + public static final String string_0078 = "TestString_0078"; + public static final String string_0079 = "TestString_0079"; + public static final String string_0080 = "TestString_0080"; + public static final String string_0081 = "TestString_0081"; + public static final String string_0082 = "TestString_0082"; + public static final String string_0083 = "TestString_0083"; + public static final String string_0084 = "TestString_0084"; + public static final String string_0085 = "TestString_0085"; + public static final String string_0086 = "TestString_0086"; + public static final String string_0087 = "TestString_0087"; + public static final String string_0088 = "TestString_0088"; + public static final String string_0089 = "TestString_0089"; + public static final String string_0090 = "TestString_0090"; + public static final String string_0091 = "TestString_0091"; + public static final String string_0092 = "TestString_0092"; + public static final String string_0093 = "TestString_0093"; + public static final String string_0094 = "TestString_0094"; + public static final String string_0095 = "TestString_0095"; + public static final String string_0096 = "TestString_0096"; + public static final String string_0097 = "TestString_0097"; + public static final String string_0098 = "TestString_0098"; + public static final String string_0099 = "TestString_0099"; + public static final String string_0100 = "TestString_0100"; + public static final String string_0101 = "TestString_0101"; + public static final String string_0102 = "TestString_0102"; + public static final String string_0103 = "TestString_0103"; + public static final String string_0104 = "TestString_0104"; + public static final String string_0105 = "TestString_0105"; + public static final String string_0106 = "TestString_0106"; + public static final String string_0107 = "TestString_0107"; + public static final String string_0108 = "TestString_0108"; + public static final String string_0109 = "TestString_0109"; + public static final String string_0110 = "TestString_0110"; + public static final String string_0111 = "TestString_0111"; + public static final String string_0112 = "TestString_0112"; + public static final String string_0113 = "TestString_0113"; + public static final String string_0114 = "TestString_0114"; + public static final String string_0115 = "TestString_0115"; + public static final String string_0116 = "TestString_0116"; + public static final String string_0117 = "TestString_0117"; + public static final String string_0118 = "TestString_0118"; + public static final String string_0119 = "TestString_0119"; + public static final String string_0120 = "TestString_0120"; + public static final String string_0121 = "TestString_0121"; + public static final String string_0122 = "TestString_0122"; + public static final String string_0123 = "TestString_0123"; + public static final String string_0124 = "TestString_0124"; + public static final String string_0125 = "TestString_0125"; + public static final String string_0126 = "TestString_0126"; + public static final String string_0127 = "TestString_0127"; + public static final String string_0128 = "TestString_0128"; + public static final String string_0129 = "TestString_0129"; + public static final String string_0130 = "TestString_0130"; + public static final String string_0131 = "TestString_0131"; + public static final String string_0132 = "TestString_0132"; + public static final String string_0133 = "TestString_0133"; + public static final String string_0134 = "TestString_0134"; + public static final String string_0135 = "TestString_0135"; + public static final String string_0136 = "TestString_0136"; + public static final String string_0137 = "TestString_0137"; + public static final String string_0138 = "TestString_0138"; + public static final String string_0139 = "TestString_0139"; + public static final String string_0140 = "TestString_0140"; + public static final String string_0141 = "TestString_0141"; + public static final String string_0142 = "TestString_0142"; + public static final String string_0143 = "TestString_0143"; + public static final String string_0144 = "TestString_0144"; + public static final String string_0145 = "TestString_0145"; + public static final String string_0146 = "TestString_0146"; + public static final String string_0147 = "TestString_0147"; + public static final String string_0148 = "TestString_0148"; + public static final String string_0149 = "TestString_0149"; + public static final String string_0150 = "TestString_0150"; + public static final String string_0151 = "TestString_0151"; + public static final String string_0152 = "TestString_0152"; + public static final String string_0153 = "TestString_0153"; + public static final String string_0154 = "TestString_0154"; + public static final String string_0155 = "TestString_0155"; + public static final String string_0156 = "TestString_0156"; + public static final String string_0157 = "TestString_0157"; + public static final String string_0158 = "TestString_0158"; + public static final String string_0159 = "TestString_0159"; + public static final String string_0160 = "TestString_0160"; + public static final String string_0161 = "TestString_0161"; + public static final String string_0162 = "TestString_0162"; + public static final String string_0163 = "TestString_0163"; + public static final String string_0164 = "TestString_0164"; + public static final String string_0165 = "TestString_0165"; + public static final String string_0166 = "TestString_0166"; + public static final String string_0167 = "TestString_0167"; + public static final String string_0168 = "TestString_0168"; + public static final String string_0169 = "TestString_0169"; + public static final String string_0170 = "TestString_0170"; + public static final String string_0171 = "TestString_0171"; + public static final String string_0172 = "TestString_0172"; + public static final String string_0173 = "TestString_0173"; + public static final String string_0174 = "TestString_0174"; + public static final String string_0175 = "TestString_0175"; + public static final String string_0176 = "TestString_0176"; + public static final String string_0177 = "TestString_0177"; + public static final String string_0178 = "TestString_0178"; + public static final String string_0179 = "TestString_0179"; + public static final String string_0180 = "TestString_0180"; + public static final String string_0181 = "TestString_0181"; + public static final String string_0182 = "TestString_0182"; + public static final String string_0183 = "TestString_0183"; + public static final String string_0184 = "TestString_0184"; + public static final String string_0185 = "TestString_0185"; + public static final String string_0186 = "TestString_0186"; + public static final String string_0187 = "TestString_0187"; + public static final String string_0188 = "TestString_0188"; + public static final String string_0189 = "TestString_0189"; + public static final String string_0190 = "TestString_0190"; + public static final String string_0191 = "TestString_0191"; + public static final String string_0192 = "TestString_0192"; + public static final String string_0193 = "TestString_0193"; + public static final String string_0194 = "TestString_0194"; + public static final String string_0195 = "TestString_0195"; + public static final String string_0196 = "TestString_0196"; + public static final String string_0197 = "TestString_0197"; + public static final String string_0198 = "TestString_0198"; + public static final String string_0199 = "TestString_0199"; + public static final String string_0200 = "TestString_0200"; + public static final String string_0201 = "TestString_0201"; + public static final String string_0202 = "TestString_0202"; + public static final String string_0203 = "TestString_0203"; + public static final String string_0204 = "TestString_0204"; + public static final String string_0205 = "TestString_0205"; + public static final String string_0206 = "TestString_0206"; + public static final String string_0207 = "TestString_0207"; + public static final String string_0208 = "TestString_0208"; + public static final String string_0209 = "TestString_0209"; + public static final String string_0210 = "TestString_0210"; + public static final String string_0211 = "TestString_0211"; + public static final String string_0212 = "TestString_0212"; + public static final String string_0213 = "TestString_0213"; + public static final String string_0214 = "TestString_0214"; + public static final String string_0215 = "TestString_0215"; + public static final String string_0216 = "TestString_0216"; + public static final String string_0217 = "TestString_0217"; + public static final String string_0218 = "TestString_0218"; + public static final String string_0219 = "TestString_0219"; + public static final String string_0220 = "TestString_0220"; + public static final String string_0221 = "TestString_0221"; + public static final String string_0222 = "TestString_0222"; + public static final String string_0223 = "TestString_0223"; + public static final String string_0224 = "TestString_0224"; + public static final String string_0225 = "TestString_0225"; + public static final String string_0226 = "TestString_0226"; + public static final String string_0227 = "TestString_0227"; + public static final String string_0228 = "TestString_0228"; + public static final String string_0229 = "TestString_0229"; + public static final String string_0230 = "TestString_0230"; + public static final String string_0231 = "TestString_0231"; + public static final String string_0232 = "TestString_0232"; + public static final String string_0233 = "TestString_0233"; + public static final String string_0234 = "TestString_0234"; + public static final String string_0235 = "TestString_0235"; + public static final String string_0236 = "TestString_0236"; + public static final String string_0237 = "TestString_0237"; + public static final String string_0238 = "TestString_0238"; + public static final String string_0239 = "TestString_0239"; + public static final String string_0240 = "TestString_0240"; + public static final String string_0241 = "TestString_0241"; + public static final String string_0242 = "TestString_0242"; + public static final String string_0243 = "TestString_0243"; + public static final String string_0244 = "TestString_0244"; + public static final String string_0245 = "TestString_0245"; + public static final String string_0246 = "TestString_0246"; + public static final String string_0247 = "TestString_0247"; + public static final String string_0248 = "TestString_0248"; + public static final String string_0249 = "TestString_0249"; + public static final String string_0250 = "TestString_0250"; + public static final String string_0251 = "TestString_0251"; + public static final String string_0252 = "TestString_0252"; + public static final String string_0253 = "TestString_0253"; + public static final String string_0254 = "TestString_0254"; + public static final String string_0255 = "TestString_0255"; + public static final String string_0256 = "TestString_0256"; + public static final String string_0257 = "TestString_0257"; + public static final String string_0258 = "TestString_0258"; + public static final String string_0259 = "TestString_0259"; + public static final String string_0260 = "TestString_0260"; + public static final String string_0261 = "TestString_0261"; + public static final String string_0262 = "TestString_0262"; + public static final String string_0263 = "TestString_0263"; + public static final String string_0264 = "TestString_0264"; + public static final String string_0265 = "TestString_0265"; + public static final String string_0266 = "TestString_0266"; + public static final String string_0267 = "TestString_0267"; + public static final String string_0268 = "TestString_0268"; + public static final String string_0269 = "TestString_0269"; + public static final String string_0270 = "TestString_0270"; + public static final String string_0271 = "TestString_0271"; + public static final String string_0272 = "TestString_0272"; + public static final String string_0273 = "TestString_0273"; + public static final String string_0274 = "TestString_0274"; + public static final String string_0275 = "TestString_0275"; + public static final String string_0276 = "TestString_0276"; + public static final String string_0277 = "TestString_0277"; + public static final String string_0278 = "TestString_0278"; + public static final String string_0279 = "TestString_0279"; + public static final String string_0280 = "TestString_0280"; + public static final String string_0281 = "TestString_0281"; + public static final String string_0282 = "TestString_0282"; + public static final String string_0283 = "TestString_0283"; + public static final String string_0284 = "TestString_0284"; + public static final String string_0285 = "TestString_0285"; + public static final String string_0286 = "TestString_0286"; + public static final String string_0287 = "TestString_0287"; + public static final String string_0288 = "TestString_0288"; + public static final String string_0289 = "TestString_0289"; + public static final String string_0290 = "TestString_0290"; + public static final String string_0291 = "TestString_0291"; + public static final String string_0292 = "TestString_0292"; + public static final String string_0293 = "TestString_0293"; + public static final String string_0294 = "TestString_0294"; + public static final String string_0295 = "TestString_0295"; + public static final String string_0296 = "TestString_0296"; + public static final String string_0297 = "TestString_0297"; + public static final String string_0298 = "TestString_0298"; + public static final String string_0299 = "TestString_0299"; + public static final String string_0300 = "TestString_0300"; + public static final String string_0301 = "TestString_0301"; + public static final String string_0302 = "TestString_0302"; + public static final String string_0303 = "TestString_0303"; + public static final String string_0304 = "TestString_0304"; + public static final String string_0305 = "TestString_0305"; + public static final String string_0306 = "TestString_0306"; + public static final String string_0307 = "TestString_0307"; + public static final String string_0308 = "TestString_0308"; + public static final String string_0309 = "TestString_0309"; + public static final String string_0310 = "TestString_0310"; + public static final String string_0311 = "TestString_0311"; + public static final String string_0312 = "TestString_0312"; + public static final String string_0313 = "TestString_0313"; + public static final String string_0314 = "TestString_0314"; + public static final String string_0315 = "TestString_0315"; + public static final String string_0316 = "TestString_0316"; + public static final String string_0317 = "TestString_0317"; + public static final String string_0318 = "TestString_0318"; + public static final String string_0319 = "TestString_0319"; + public static final String string_0320 = "TestString_0320"; + public static final String string_0321 = "TestString_0321"; + public static final String string_0322 = "TestString_0322"; + public static final String string_0323 = "TestString_0323"; + public static final String string_0324 = "TestString_0324"; + public static final String string_0325 = "TestString_0325"; + public static final String string_0326 = "TestString_0326"; + public static final String string_0327 = "TestString_0327"; + public static final String string_0328 = "TestString_0328"; + public static final String string_0329 = "TestString_0329"; + public static final String string_0330 = "TestString_0330"; + public static final String string_0331 = "TestString_0331"; + public static final String string_0332 = "TestString_0332"; + public static final String string_0333 = "TestString_0333"; + public static final String string_0334 = "TestString_0334"; + public static final String string_0335 = "TestString_0335"; + public static final String string_0336 = "TestString_0336"; + public static final String string_0337 = "TestString_0337"; + public static final String string_0338 = "TestString_0338"; + public static final String string_0339 = "TestString_0339"; + public static final String string_0340 = "TestString_0340"; + public static final String string_0341 = "TestString_0341"; + public static final String string_0342 = "TestString_0342"; + public static final String string_0343 = "TestString_0343"; + public static final String string_0344 = "TestString_0344"; + public static final String string_0345 = "TestString_0345"; + public static final String string_0346 = "TestString_0346"; + public static final String string_0347 = "TestString_0347"; + public static final String string_0348 = "TestString_0348"; + public static final String string_0349 = "TestString_0349"; + public static final String string_0350 = "TestString_0350"; + public static final String string_0351 = "TestString_0351"; + public static final String string_0352 = "TestString_0352"; + public static final String string_0353 = "TestString_0353"; + public static final String string_0354 = "TestString_0354"; + public static final String string_0355 = "TestString_0355"; + public static final String string_0356 = "TestString_0356"; + public static final String string_0357 = "TestString_0357"; + public static final String string_0358 = "TestString_0358"; + public static final String string_0359 = "TestString_0359"; + public static final String string_0360 = "TestString_0360"; + public static final String string_0361 = "TestString_0361"; + public static final String string_0362 = "TestString_0362"; + public static final String string_0363 = "TestString_0363"; + public static final String string_0364 = "TestString_0364"; + public static final String string_0365 = "TestString_0365"; + public static final String string_0366 = "TestString_0366"; + public static final String string_0367 = "TestString_0367"; + public static final String string_0368 = "TestString_0368"; + public static final String string_0369 = "TestString_0369"; + public static final String string_0370 = "TestString_0370"; + public static final String string_0371 = "TestString_0371"; + public static final String string_0372 = "TestString_0372"; + public static final String string_0373 = "TestString_0373"; + public static final String string_0374 = "TestString_0374"; + public static final String string_0375 = "TestString_0375"; + public static final String string_0376 = "TestString_0376"; + public static final String string_0377 = "TestString_0377"; + public static final String string_0378 = "TestString_0378"; + public static final String string_0379 = "TestString_0379"; + public static final String string_0380 = "TestString_0380"; + public static final String string_0381 = "TestString_0381"; + public static final String string_0382 = "TestString_0382"; + public static final String string_0383 = "TestString_0383"; + public static final String string_0384 = "TestString_0384"; + public static final String string_0385 = "TestString_0385"; + public static final String string_0386 = "TestString_0386"; + public static final String string_0387 = "TestString_0387"; + public static final String string_0388 = "TestString_0388"; + public static final String string_0389 = "TestString_0389"; + public static final String string_0390 = "TestString_0390"; + public static final String string_0391 = "TestString_0391"; + public static final String string_0392 = "TestString_0392"; + public static final String string_0393 = "TestString_0393"; + public static final String string_0394 = "TestString_0394"; + public static final String string_0395 = "TestString_0395"; + public static final String string_0396 = "TestString_0396"; + public static final String string_0397 = "TestString_0397"; + public static final String string_0398 = "TestString_0398"; + public static final String string_0399 = "TestString_0399"; + public static final String string_0400 = "TestString_0400"; + public static final String string_0401 = "TestString_0401"; + public static final String string_0402 = "TestString_0402"; + public static final String string_0403 = "TestString_0403"; + public static final String string_0404 = "TestString_0404"; + public static final String string_0405 = "TestString_0405"; + public static final String string_0406 = "TestString_0406"; + public static final String string_0407 = "TestString_0407"; + public static final String string_0408 = "TestString_0408"; + public static final String string_0409 = "TestString_0409"; + public static final String string_0410 = "TestString_0410"; + public static final String string_0411 = "TestString_0411"; + public static final String string_0412 = "TestString_0412"; + public static final String string_0413 = "TestString_0413"; + public static final String string_0414 = "TestString_0414"; + public static final String string_0415 = "TestString_0415"; + public static final String string_0416 = "TestString_0416"; + public static final String string_0417 = "TestString_0417"; + public static final String string_0418 = "TestString_0418"; + public static final String string_0419 = "TestString_0419"; + public static final String string_0420 = "TestString_0420"; + public static final String string_0421 = "TestString_0421"; + public static final String string_0422 = "TestString_0422"; + public static final String string_0423 = "TestString_0423"; + public static final String string_0424 = "TestString_0424"; + public static final String string_0425 = "TestString_0425"; + public static final String string_0426 = "TestString_0426"; + public static final String string_0427 = "TestString_0427"; + public static final String string_0428 = "TestString_0428"; + public static final String string_0429 = "TestString_0429"; + public static final String string_0430 = "TestString_0430"; + public static final String string_0431 = "TestString_0431"; + public static final String string_0432 = "TestString_0432"; + public static final String string_0433 = "TestString_0433"; + public static final String string_0434 = "TestString_0434"; + public static final String string_0435 = "TestString_0435"; + public static final String string_0436 = "TestString_0436"; + public static final String string_0437 = "TestString_0437"; + public static final String string_0438 = "TestString_0438"; + public static final String string_0439 = "TestString_0439"; + public static final String string_0440 = "TestString_0440"; + public static final String string_0441 = "TestString_0441"; + public static final String string_0442 = "TestString_0442"; + public static final String string_0443 = "TestString_0443"; + public static final String string_0444 = "TestString_0444"; + public static final String string_0445 = "TestString_0445"; + public static final String string_0446 = "TestString_0446"; + public static final String string_0447 = "TestString_0447"; + public static final String string_0448 = "TestString_0448"; + public static final String string_0449 = "TestString_0449"; + public static final String string_0450 = "TestString_0450"; + public static final String string_0451 = "TestString_0451"; + public static final String string_0452 = "TestString_0452"; + public static final String string_0453 = "TestString_0453"; + public static final String string_0454 = "TestString_0454"; + public static final String string_0455 = "TestString_0455"; + public static final String string_0456 = "TestString_0456"; + public static final String string_0457 = "TestString_0457"; + public static final String string_0458 = "TestString_0458"; + public static final String string_0459 = "TestString_0459"; + public static final String string_0460 = "TestString_0460"; + public static final String string_0461 = "TestString_0461"; + public static final String string_0462 = "TestString_0462"; + public static final String string_0463 = "TestString_0463"; + public static final String string_0464 = "TestString_0464"; + public static final String string_0465 = "TestString_0465"; + public static final String string_0466 = "TestString_0466"; + public static final String string_0467 = "TestString_0467"; + public static final String string_0468 = "TestString_0468"; + public static final String string_0469 = "TestString_0469"; + public static final String string_0470 = "TestString_0470"; + public static final String string_0471 = "TestString_0471"; + public static final String string_0472 = "TestString_0472"; + public static final String string_0473 = "TestString_0473"; + public static final String string_0474 = "TestString_0474"; + public static final String string_0475 = "TestString_0475"; + public static final String string_0476 = "TestString_0476"; + public static final String string_0477 = "TestString_0477"; + public static final String string_0478 = "TestString_0478"; + public static final String string_0479 = "TestString_0479"; + public static final String string_0480 = "TestString_0480"; + public static final String string_0481 = "TestString_0481"; + public static final String string_0482 = "TestString_0482"; + public static final String string_0483 = "TestString_0483"; + public static final String string_0484 = "TestString_0484"; + public static final String string_0485 = "TestString_0485"; + public static final String string_0486 = "TestString_0486"; + public static final String string_0487 = "TestString_0487"; + public static final String string_0488 = "TestString_0488"; + public static final String string_0489 = "TestString_0489"; + public static final String string_0490 = "TestString_0490"; + public static final String string_0491 = "TestString_0491"; + public static final String string_0492 = "TestString_0492"; + public static final String string_0493 = "TestString_0493"; + public static final String string_0494 = "TestString_0494"; + public static final String string_0495 = "TestString_0495"; + public static final String string_0496 = "TestString_0496"; + public static final String string_0497 = "TestString_0497"; + public static final String string_0498 = "TestString_0498"; + public static final String string_0499 = "TestString_0499"; + public static final String string_0500 = "TestString_0500"; + public static final String string_0501 = "TestString_0501"; + public static final String string_0502 = "TestString_0502"; + public static final String string_0503 = "TestString_0503"; + public static final String string_0504 = "TestString_0504"; + public static final String string_0505 = "TestString_0505"; + public static final String string_0506 = "TestString_0506"; + public static final String string_0507 = "TestString_0507"; + public static final String string_0508 = "TestString_0508"; + public static final String string_0509 = "TestString_0509"; + public static final String string_0510 = "TestString_0510"; + public static final String string_0511 = "TestString_0511"; + public static final String string_0512 = "TestString_0512"; + public static final String string_0513 = "TestString_0513"; + public static final String string_0514 = "TestString_0514"; + public static final String string_0515 = "TestString_0515"; + public static final String string_0516 = "TestString_0516"; + public static final String string_0517 = "TestString_0517"; + public static final String string_0518 = "TestString_0518"; + public static final String string_0519 = "TestString_0519"; + public static final String string_0520 = "TestString_0520"; + public static final String string_0521 = "TestString_0521"; + public static final String string_0522 = "TestString_0522"; + public static final String string_0523 = "TestString_0523"; + public static final String string_0524 = "TestString_0524"; + public static final String string_0525 = "TestString_0525"; + public static final String string_0526 = "TestString_0526"; + public static final String string_0527 = "TestString_0527"; + public static final String string_0528 = "TestString_0528"; + public static final String string_0529 = "TestString_0529"; + public static final String string_0530 = "TestString_0530"; + public static final String string_0531 = "TestString_0531"; + public static final String string_0532 = "TestString_0532"; + public static final String string_0533 = "TestString_0533"; + public static final String string_0534 = "TestString_0534"; + public static final String string_0535 = "TestString_0535"; + public static final String string_0536 = "TestString_0536"; + public static final String string_0537 = "TestString_0537"; + public static final String string_0538 = "TestString_0538"; + public static final String string_0539 = "TestString_0539"; + public static final String string_0540 = "TestString_0540"; + public static final String string_0541 = "TestString_0541"; + public static final String string_0542 = "TestString_0542"; + public static final String string_0543 = "TestString_0543"; + public static final String string_0544 = "TestString_0544"; + public static final String string_0545 = "TestString_0545"; + public static final String string_0546 = "TestString_0546"; + public static final String string_0547 = "TestString_0547"; + public static final String string_0548 = "TestString_0548"; + public static final String string_0549 = "TestString_0549"; + public static final String string_0550 = "TestString_0550"; + public static final String string_0551 = "TestString_0551"; + public static final String string_0552 = "TestString_0552"; + public static final String string_0553 = "TestString_0553"; + public static final String string_0554 = "TestString_0554"; + public static final String string_0555 = "TestString_0555"; + public static final String string_0556 = "TestString_0556"; + public static final String string_0557 = "TestString_0557"; + public static final String string_0558 = "TestString_0558"; + public static final String string_0559 = "TestString_0559"; + public static final String string_0560 = "TestString_0560"; + public static final String string_0561 = "TestString_0561"; + public static final String string_0562 = "TestString_0562"; + public static final String string_0563 = "TestString_0563"; + public static final String string_0564 = "TestString_0564"; + public static final String string_0565 = "TestString_0565"; + public static final String string_0566 = "TestString_0566"; + public static final String string_0567 = "TestString_0567"; + public static final String string_0568 = "TestString_0568"; + public static final String string_0569 = "TestString_0569"; + public static final String string_0570 = "TestString_0570"; + public static final String string_0571 = "TestString_0571"; + public static final String string_0572 = "TestString_0572"; + public static final String string_0573 = "TestString_0573"; + public static final String string_0574 = "TestString_0574"; + public static final String string_0575 = "TestString_0575"; + public static final String string_0576 = "TestString_0576"; + public static final String string_0577 = "TestString_0577"; + public static final String string_0578 = "TestString_0578"; + public static final String string_0579 = "TestString_0579"; + public static final String string_0580 = "TestString_0580"; + public static final String string_0581 = "TestString_0581"; + public static final String string_0582 = "TestString_0582"; + public static final String string_0583 = "TestString_0583"; + public static final String string_0584 = "TestString_0584"; + public static final String string_0585 = "TestString_0585"; + public static final String string_0586 = "TestString_0586"; + public static final String string_0587 = "TestString_0587"; + public static final String string_0588 = "TestString_0588"; + public static final String string_0589 = "TestString_0589"; + public static final String string_0590 = "TestString_0590"; + public static final String string_0591 = "TestString_0591"; + public static final String string_0592 = "TestString_0592"; + public static final String string_0593 = "TestString_0593"; + public static final String string_0594 = "TestString_0594"; + public static final String string_0595 = "TestString_0595"; + public static final String string_0596 = "TestString_0596"; + public static final String string_0597 = "TestString_0597"; + public static final String string_0598 = "TestString_0598"; + public static final String string_0599 = "TestString_0599"; + public static final String string_0600 = "TestString_0600"; + public static final String string_0601 = "TestString_0601"; + public static final String string_0602 = "TestString_0602"; + public static final String string_0603 = "TestString_0603"; + public static final String string_0604 = "TestString_0604"; + public static final String string_0605 = "TestString_0605"; + public static final String string_0606 = "TestString_0606"; + public static final String string_0607 = "TestString_0607"; + public static final String string_0608 = "TestString_0608"; + public static final String string_0609 = "TestString_0609"; + public static final String string_0610 = "TestString_0610"; + public static final String string_0611 = "TestString_0611"; + public static final String string_0612 = "TestString_0612"; + public static final String string_0613 = "TestString_0613"; + public static final String string_0614 = "TestString_0614"; + public static final String string_0615 = "TestString_0615"; + public static final String string_0616 = "TestString_0616"; + public static final String string_0617 = "TestString_0617"; + public static final String string_0618 = "TestString_0618"; + public static final String string_0619 = "TestString_0619"; + public static final String string_0620 = "TestString_0620"; + public static final String string_0621 = "TestString_0621"; + public static final String string_0622 = "TestString_0622"; + public static final String string_0623 = "TestString_0623"; + public static final String string_0624 = "TestString_0624"; + public static final String string_0625 = "TestString_0625"; + public static final String string_0626 = "TestString_0626"; + public static final String string_0627 = "TestString_0627"; + public static final String string_0628 = "TestString_0628"; + public static final String string_0629 = "TestString_0629"; + public static final String string_0630 = "TestString_0630"; + public static final String string_0631 = "TestString_0631"; + public static final String string_0632 = "TestString_0632"; + public static final String string_0633 = "TestString_0633"; + public static final String string_0634 = "TestString_0634"; + public static final String string_0635 = "TestString_0635"; + public static final String string_0636 = "TestString_0636"; + public static final String string_0637 = "TestString_0637"; + public static final String string_0638 = "TestString_0638"; + public static final String string_0639 = "TestString_0639"; + public static final String string_0640 = "TestString_0640"; + public static final String string_0641 = "TestString_0641"; + public static final String string_0642 = "TestString_0642"; + public static final String string_0643 = "TestString_0643"; + public static final String string_0644 = "TestString_0644"; + public static final String string_0645 = "TestString_0645"; + public static final String string_0646 = "TestString_0646"; + public static final String string_0647 = "TestString_0647"; + public static final String string_0648 = "TestString_0648"; + public static final String string_0649 = "TestString_0649"; + public static final String string_0650 = "TestString_0650"; + public static final String string_0651 = "TestString_0651"; + public static final String string_0652 = "TestString_0652"; + public static final String string_0653 = "TestString_0653"; + public static final String string_0654 = "TestString_0654"; + public static final String string_0655 = "TestString_0655"; + public static final String string_0656 = "TestString_0656"; + public static final String string_0657 = "TestString_0657"; + public static final String string_0658 = "TestString_0658"; + public static final String string_0659 = "TestString_0659"; + public static final String string_0660 = "TestString_0660"; + public static final String string_0661 = "TestString_0661"; + public static final String string_0662 = "TestString_0662"; + public static final String string_0663 = "TestString_0663"; + public static final String string_0664 = "TestString_0664"; + public static final String string_0665 = "TestString_0665"; + public static final String string_0666 = "TestString_0666"; + public static final String string_0667 = "TestString_0667"; + public static final String string_0668 = "TestString_0668"; + public static final String string_0669 = "TestString_0669"; + public static final String string_0670 = "TestString_0670"; + public static final String string_0671 = "TestString_0671"; + public static final String string_0672 = "TestString_0672"; + public static final String string_0673 = "TestString_0673"; + public static final String string_0674 = "TestString_0674"; + public static final String string_0675 = "TestString_0675"; + public static final String string_0676 = "TestString_0676"; + public static final String string_0677 = "TestString_0677"; + public static final String string_0678 = "TestString_0678"; + public static final String string_0679 = "TestString_0679"; + public static final String string_0680 = "TestString_0680"; + public static final String string_0681 = "TestString_0681"; + public static final String string_0682 = "TestString_0682"; + public static final String string_0683 = "TestString_0683"; + public static final String string_0684 = "TestString_0684"; + public static final String string_0685 = "TestString_0685"; + public static final String string_0686 = "TestString_0686"; + public static final String string_0687 = "TestString_0687"; + public static final String string_0688 = "TestString_0688"; + public static final String string_0689 = "TestString_0689"; + public static final String string_0690 = "TestString_0690"; + public static final String string_0691 = "TestString_0691"; + public static final String string_0692 = "TestString_0692"; + public static final String string_0693 = "TestString_0693"; + public static final String string_0694 = "TestString_0694"; + public static final String string_0695 = "TestString_0695"; + public static final String string_0696 = "TestString_0696"; + public static final String string_0697 = "TestString_0697"; + public static final String string_0698 = "TestString_0698"; + public static final String string_0699 = "TestString_0699"; + public static final String string_0700 = "TestString_0700"; + public static final String string_0701 = "TestString_0701"; + public static final String string_0702 = "TestString_0702"; + public static final String string_0703 = "TestString_0703"; + public static final String string_0704 = "TestString_0704"; + public static final String string_0705 = "TestString_0705"; + public static final String string_0706 = "TestString_0706"; + public static final String string_0707 = "TestString_0707"; + public static final String string_0708 = "TestString_0708"; + public static final String string_0709 = "TestString_0709"; + public static final String string_0710 = "TestString_0710"; + public static final String string_0711 = "TestString_0711"; + public static final String string_0712 = "TestString_0712"; + public static final String string_0713 = "TestString_0713"; + public static final String string_0714 = "TestString_0714"; + public static final String string_0715 = "TestString_0715"; + public static final String string_0716 = "TestString_0716"; + public static final String string_0717 = "TestString_0717"; + public static final String string_0718 = "TestString_0718"; + public static final String string_0719 = "TestString_0719"; + public static final String string_0720 = "TestString_0720"; + public static final String string_0721 = "TestString_0721"; + public static final String string_0722 = "TestString_0722"; + public static final String string_0723 = "TestString_0723"; + public static final String string_0724 = "TestString_0724"; + public static final String string_0725 = "TestString_0725"; + public static final String string_0726 = "TestString_0726"; + public static final String string_0727 = "TestString_0727"; + public static final String string_0728 = "TestString_0728"; + public static final String string_0729 = "TestString_0729"; + public static final String string_0730 = "TestString_0730"; + public static final String string_0731 = "TestString_0731"; + public static final String string_0732 = "TestString_0732"; + public static final String string_0733 = "TestString_0733"; + public static final String string_0734 = "TestString_0734"; + public static final String string_0735 = "TestString_0735"; + public static final String string_0736 = "TestString_0736"; + public static final String string_0737 = "TestString_0737"; + public static final String string_0738 = "TestString_0738"; + public static final String string_0739 = "TestString_0739"; + public static final String string_0740 = "TestString_0740"; + public static final String string_0741 = "TestString_0741"; + public static final String string_0742 = "TestString_0742"; + public static final String string_0743 = "TestString_0743"; + public static final String string_0744 = "TestString_0744"; + public static final String string_0745 = "TestString_0745"; + public static final String string_0746 = "TestString_0746"; + public static final String string_0747 = "TestString_0747"; + public static final String string_0748 = "TestString_0748"; + public static final String string_0749 = "TestString_0749"; + public static final String string_0750 = "TestString_0750"; + public static final String string_0751 = "TestString_0751"; + public static final String string_0752 = "TestString_0752"; + public static final String string_0753 = "TestString_0753"; + public static final String string_0754 = "TestString_0754"; + public static final String string_0755 = "TestString_0755"; + public static final String string_0756 = "TestString_0756"; + public static final String string_0757 = "TestString_0757"; + public static final String string_0758 = "TestString_0758"; + public static final String string_0759 = "TestString_0759"; + public static final String string_0760 = "TestString_0760"; + public static final String string_0761 = "TestString_0761"; + public static final String string_0762 = "TestString_0762"; + public static final String string_0763 = "TestString_0763"; + public static final String string_0764 = "TestString_0764"; + public static final String string_0765 = "TestString_0765"; + public static final String string_0766 = "TestString_0766"; + public static final String string_0767 = "TestString_0767"; + public static final String string_0768 = "TestString_0768"; + public static final String string_0769 = "TestString_0769"; + public static final String string_0770 = "TestString_0770"; + public static final String string_0771 = "TestString_0771"; + public static final String string_0772 = "TestString_0772"; + public static final String string_0773 = "TestString_0773"; + public static final String string_0774 = "TestString_0774"; + public static final String string_0775 = "TestString_0775"; + public static final String string_0776 = "TestString_0776"; + public static final String string_0777 = "TestString_0777"; + public static final String string_0778 = "TestString_0778"; + public static final String string_0779 = "TestString_0779"; + public static final String string_0780 = "TestString_0780"; + public static final String string_0781 = "TestString_0781"; + public static final String string_0782 = "TestString_0782"; + public static final String string_0783 = "TestString_0783"; + public static final String string_0784 = "TestString_0784"; + public static final String string_0785 = "TestString_0785"; + public static final String string_0786 = "TestString_0786"; + public static final String string_0787 = "TestString_0787"; + public static final String string_0788 = "TestString_0788"; + public static final String string_0789 = "TestString_0789"; + public static final String string_0790 = "TestString_0790"; + public static final String string_0791 = "TestString_0791"; + public static final String string_0792 = "TestString_0792"; + public static final String string_0793 = "TestString_0793"; + public static final String string_0794 = "TestString_0794"; + public static final String string_0795 = "TestString_0795"; + public static final String string_0796 = "TestString_0796"; + public static final String string_0797 = "TestString_0797"; + public static final String string_0798 = "TestString_0798"; + public static final String string_0799 = "TestString_0799"; + public static final String string_0800 = "TestString_0800"; + public static final String string_0801 = "TestString_0801"; + public static final String string_0802 = "TestString_0802"; + public static final String string_0803 = "TestString_0803"; + public static final String string_0804 = "TestString_0804"; + public static final String string_0805 = "TestString_0805"; + public static final String string_0806 = "TestString_0806"; + public static final String string_0807 = "TestString_0807"; + public static final String string_0808 = "TestString_0808"; + public static final String string_0809 = "TestString_0809"; + public static final String string_0810 = "TestString_0810"; + public static final String string_0811 = "TestString_0811"; + public static final String string_0812 = "TestString_0812"; + public static final String string_0813 = "TestString_0813"; + public static final String string_0814 = "TestString_0814"; + public static final String string_0815 = "TestString_0815"; + public static final String string_0816 = "TestString_0816"; + public static final String string_0817 = "TestString_0817"; + public static final String string_0818 = "TestString_0818"; + public static final String string_0819 = "TestString_0819"; + public static final String string_0820 = "TestString_0820"; + public static final String string_0821 = "TestString_0821"; + public static final String string_0822 = "TestString_0822"; + public static final String string_0823 = "TestString_0823"; + public static final String string_0824 = "TestString_0824"; + public static final String string_0825 = "TestString_0825"; + public static final String string_0826 = "TestString_0826"; + public static final String string_0827 = "TestString_0827"; + public static final String string_0828 = "TestString_0828"; + public static final String string_0829 = "TestString_0829"; + public static final String string_0830 = "TestString_0830"; + public static final String string_0831 = "TestString_0831"; + public static final String string_0832 = "TestString_0832"; + public static final String string_0833 = "TestString_0833"; + public static final String string_0834 = "TestString_0834"; + public static final String string_0835 = "TestString_0835"; + public static final String string_0836 = "TestString_0836"; + public static final String string_0837 = "TestString_0837"; + public static final String string_0838 = "TestString_0838"; + public static final String string_0839 = "TestString_0839"; + public static final String string_0840 = "TestString_0840"; + public static final String string_0841 = "TestString_0841"; + public static final String string_0842 = "TestString_0842"; + public static final String string_0843 = "TestString_0843"; + public static final String string_0844 = "TestString_0844"; + public static final String string_0845 = "TestString_0845"; + public static final String string_0846 = "TestString_0846"; + public static final String string_0847 = "TestString_0847"; + public static final String string_0848 = "TestString_0848"; + public static final String string_0849 = "TestString_0849"; + public static final String string_0850 = "TestString_0850"; + public static final String string_0851 = "TestString_0851"; + public static final String string_0852 = "TestString_0852"; + public static final String string_0853 = "TestString_0853"; + public static final String string_0854 = "TestString_0854"; + public static final String string_0855 = "TestString_0855"; + public static final String string_0856 = "TestString_0856"; + public static final String string_0857 = "TestString_0857"; + public static final String string_0858 = "TestString_0858"; + public static final String string_0859 = "TestString_0859"; + public static final String string_0860 = "TestString_0860"; + public static final String string_0861 = "TestString_0861"; + public static final String string_0862 = "TestString_0862"; + public static final String string_0863 = "TestString_0863"; + public static final String string_0864 = "TestString_0864"; + public static final String string_0865 = "TestString_0865"; + public static final String string_0866 = "TestString_0866"; + public static final String string_0867 = "TestString_0867"; + public static final String string_0868 = "TestString_0868"; + public static final String string_0869 = "TestString_0869"; + public static final String string_0870 = "TestString_0870"; + public static final String string_0871 = "TestString_0871"; + public static final String string_0872 = "TestString_0872"; + public static final String string_0873 = "TestString_0873"; + public static final String string_0874 = "TestString_0874"; + public static final String string_0875 = "TestString_0875"; + public static final String string_0876 = "TestString_0876"; + public static final String string_0877 = "TestString_0877"; + public static final String string_0878 = "TestString_0878"; + public static final String string_0879 = "TestString_0879"; + public static final String string_0880 = "TestString_0880"; + public static final String string_0881 = "TestString_0881"; + public static final String string_0882 = "TestString_0882"; + public static final String string_0883 = "TestString_0883"; + public static final String string_0884 = "TestString_0884"; + public static final String string_0885 = "TestString_0885"; + public static final String string_0886 = "TestString_0886"; + public static final String string_0887 = "TestString_0887"; + public static final String string_0888 = "TestString_0888"; + public static final String string_0889 = "TestString_0889"; + public static final String string_0890 = "TestString_0890"; + public static final String string_0891 = "TestString_0891"; + public static final String string_0892 = "TestString_0892"; + public static final String string_0893 = "TestString_0893"; + public static final String string_0894 = "TestString_0894"; + public static final String string_0895 = "TestString_0895"; + public static final String string_0896 = "TestString_0896"; + public static final String string_0897 = "TestString_0897"; + public static final String string_0898 = "TestString_0898"; + public static final String string_0899 = "TestString_0899"; + public static final String string_0900 = "TestString_0900"; + public static final String string_0901 = "TestString_0901"; + public static final String string_0902 = "TestString_0902"; + public static final String string_0903 = "TestString_0903"; + public static final String string_0904 = "TestString_0904"; + public static final String string_0905 = "TestString_0905"; + public static final String string_0906 = "TestString_0906"; + public static final String string_0907 = "TestString_0907"; + public static final String string_0908 = "TestString_0908"; + public static final String string_0909 = "TestString_0909"; + public static final String string_0910 = "TestString_0910"; + public static final String string_0911 = "TestString_0911"; + public static final String string_0912 = "TestString_0912"; + public static final String string_0913 = "TestString_0913"; + public static final String string_0914 = "TestString_0914"; + public static final String string_0915 = "TestString_0915"; + public static final String string_0916 = "TestString_0916"; + public static final String string_0917 = "TestString_0917"; + public static final String string_0918 = "TestString_0918"; + public static final String string_0919 = "TestString_0919"; + public static final String string_0920 = "TestString_0920"; + public static final String string_0921 = "TestString_0921"; + public static final String string_0922 = "TestString_0922"; + public static final String string_0923 = "TestString_0923"; + public static final String string_0924 = "TestString_0924"; + public static final String string_0925 = "TestString_0925"; + public static final String string_0926 = "TestString_0926"; + public static final String string_0927 = "TestString_0927"; + public static final String string_0928 = "TestString_0928"; + public static final String string_0929 = "TestString_0929"; + public static final String string_0930 = "TestString_0930"; + public static final String string_0931 = "TestString_0931"; + public static final String string_0932 = "TestString_0932"; + public static final String string_0933 = "TestString_0933"; + public static final String string_0934 = "TestString_0934"; + public static final String string_0935 = "TestString_0935"; + public static final String string_0936 = "TestString_0936"; + public static final String string_0937 = "TestString_0937"; + public static final String string_0938 = "TestString_0938"; + public static final String string_0939 = "TestString_0939"; + public static final String string_0940 = "TestString_0940"; + public static final String string_0941 = "TestString_0941"; + public static final String string_0942 = "TestString_0942"; + public static final String string_0943 = "TestString_0943"; + public static final String string_0944 = "TestString_0944"; + public static final String string_0945 = "TestString_0945"; + public static final String string_0946 = "TestString_0946"; + public static final String string_0947 = "TestString_0947"; + public static final String string_0948 = "TestString_0948"; + public static final String string_0949 = "TestString_0949"; + public static final String string_0950 = "TestString_0950"; + public static final String string_0951 = "TestString_0951"; + public static final String string_0952 = "TestString_0952"; + public static final String string_0953 = "TestString_0953"; + public static final String string_0954 = "TestString_0954"; + public static final String string_0955 = "TestString_0955"; + public static final String string_0956 = "TestString_0956"; + public static final String string_0957 = "TestString_0957"; + public static final String string_0958 = "TestString_0958"; + public static final String string_0959 = "TestString_0959"; + public static final String string_0960 = "TestString_0960"; + public static final String string_0961 = "TestString_0961"; + public static final String string_0962 = "TestString_0962"; + public static final String string_0963 = "TestString_0963"; + public static final String string_0964 = "TestString_0964"; + public static final String string_0965 = "TestString_0965"; + public static final String string_0966 = "TestString_0966"; + public static final String string_0967 = "TestString_0967"; + public static final String string_0968 = "TestString_0968"; + public static final String string_0969 = "TestString_0969"; + public static final String string_0970 = "TestString_0970"; + public static final String string_0971 = "TestString_0971"; + public static final String string_0972 = "TestString_0972"; + public static final String string_0973 = "TestString_0973"; + public static final String string_0974 = "TestString_0974"; + public static final String string_0975 = "TestString_0975"; + public static final String string_0976 = "TestString_0976"; + public static final String string_0977 = "TestString_0977"; + public static final String string_0978 = "TestString_0978"; + public static final String string_0979 = "TestString_0979"; + public static final String string_0980 = "TestString_0980"; + public static final String string_0981 = "TestString_0981"; + public static final String string_0982 = "TestString_0982"; + public static final String string_0983 = "TestString_0983"; + public static final String string_0984 = "TestString_0984"; + public static final String string_0985 = "TestString_0985"; + public static final String string_0986 = "TestString_0986"; + public static final String string_0987 = "TestString_0987"; + public static final String string_0988 = "TestString_0988"; + public static final String string_0989 = "TestString_0989"; + public static final String string_0990 = "TestString_0990"; + public static final String string_0991 = "TestString_0991"; + public static final String string_0992 = "TestString_0992"; + public static final String string_0993 = "TestString_0993"; + public static final String string_0994 = "TestString_0994"; + public static final String string_0995 = "TestString_0995"; + public static final String string_0996 = "TestString_0996"; + public static final String string_0997 = "TestString_0997"; + public static final String string_0998 = "TestString_0998"; + public static final String string_0999 = "TestString_0999"; + public static final String string_1000 = "TestString_1000"; + public static final String string_1001 = "TestString_1001"; + public static final String string_1002 = "TestString_1002"; + public static final String string_1003 = "TestString_1003"; + public static final String string_1004 = "TestString_1004"; + public static final String string_1005 = "TestString_1005"; + public static final String string_1006 = "TestString_1006"; + public static final String string_1007 = "TestString_1007"; + public static final String string_1008 = "TestString_1008"; + public static final String string_1009 = "TestString_1009"; + public static final String string_1010 = "TestString_1010"; + public static final String string_1011 = "TestString_1011"; + public static final String string_1012 = "TestString_1012"; + public static final String string_1013 = "TestString_1013"; + public static final String string_1014 = "TestString_1014"; + public static final String string_1015 = "TestString_1015"; + public static final String string_1016 = "TestString_1016"; + public static final String string_1017 = "TestString_1017"; + public static final String string_1018 = "TestString_1018"; + public static final String string_1019 = "TestString_1019"; + public static final String string_1020 = "TestString_1020"; + public static final String string_1021 = "TestString_1021"; + public static final String string_1022 = "TestString_1022"; + public static final String string_1023 = "TestString_1023"; + public static final String string_1024 = "TestString_1024"; + + public void timeConstStringsWithConflict(int count) { + for (int i = 0; i < count; ++i) { + $noinline$foo("TestString_0000"); + $noinline$foo("TestString_1024"); + } + } + + public void timeConstStringsWithoutConflict(int count) { + for (int i = 0; i < count; ++i) { + $noinline$foo("TestString_0001"); + $noinline$foo("TestString_1023"); + } + } + + static void $noinline$foo(String s) { + if (doThrow) { throw new Error(); } + } + + public static boolean doThrow = false; +} diff --git a/benchmark/jni-perf/info.txt b/benchmark/jni-perf/info.txt new file mode 100644 index 0000000..010b57b --- /dev/null +++ b/benchmark/jni-perf/info.txt @@ -0,0 +1 @@ +Tests for measuring performance of JNI state changes. diff --git a/benchmark/jni-perf/perf_jni.cc b/benchmark/jni-perf/perf_jni.cc new file mode 100644 index 0000000..06dded8 --- /dev/null +++ b/benchmark/jni-perf/perf_jni.cc @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "jni.h" +#include "scoped_thread_state_change-inl.h" +#include "thread.h" + +namespace art { + +namespace { + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfJniEmptyCall(JNIEnv*, jobject) {} + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOACall(JNIEnv* env, jobject) { + ScopedObjectAccess soa(env); +} + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOAUncheckedCall(JNIEnv*, jobject) { + ScopedObjectAccessUnchecked soa(Thread::Current()); +} + +} // namespace + +} // namespace art diff --git a/benchmark/jni-perf/src/JniPerfBenchmark.java b/benchmark/jni-perf/src/JniPerfBenchmark.java new file mode 100644 index 0000000..1e7cc2b --- /dev/null +++ b/benchmark/jni-perf/src/JniPerfBenchmark.java @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class JniPerfBenchmark { + private static final String MSG = "ABCDE"; + + native void perfJniEmptyCall(); + native void perfSOACall(); + native void perfSOAUncheckedCall(); + + public void timeFastJNI(int N) { + // TODO: This might be an intrinsic. + for (long i = 0; i < N; i++) { + char c = MSG.charAt(2); + } + } + + public void timeEmptyCall(int N) { + for (long i = 0; i < N; i++) { + perfJniEmptyCall(); + } + } + + public void timeSOACall(int N) { + for (long i = 0; i < N; i++) { + perfSOACall(); + } + } + + public void timeSOAUncheckedCall(int N) { + for (long i = 0; i < N; i++) { + perfSOAUncheckedCall(); + } + } + + { + System.loadLibrary("artbenchmark"); + } +} diff --git a/benchmark/jni_loader.cc b/benchmark/jni_loader.cc new file mode 100644 index 0000000..2c9f86e --- /dev/null +++ b/benchmark/jni_loader.cc @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +extern void register_micro_native_methods(JNIEnv* env); + +jint JNI_OnLoad(JavaVM* vm, void* /*reserved*/) { + JNIEnv* env; + if (vm->GetEnv(reinterpret_cast(&env), JNI_VERSION_1_6) != JNI_OK) { + return -1; + } + + // List of functions to call to register methods explicitly. + // Otherwise we use the regular JNI naming conventions to register implicitly. + register_micro_native_methods(env); + + return JNI_VERSION_1_6; +} diff --git a/benchmark/jobject-benchmark/info.txt b/benchmark/jobject-benchmark/info.txt new file mode 100644 index 0000000..f2a256a --- /dev/null +++ b/benchmark/jobject-benchmark/info.txt @@ -0,0 +1,7 @@ +Benchmark for jobject functions + +Measures performance of: +Add/RemoveLocalRef +Add/RemoveGlobalRef +Add/RemoveWeakGlobalRef +Decoding local, weak, global, handle scope jobjects. diff --git a/benchmark/jobject-benchmark/jobject_benchmark.cc b/benchmark/jobject-benchmark/jobject_benchmark.cc new file mode 100644 index 0000000..2f38b78 --- /dev/null +++ b/benchmark/jobject-benchmark/jobject_benchmark.cc @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni.h" + +#include "jni/java_vm_ext.h" +#include "mirror/class-inl.h" +#include "scoped_thread_state_change-inl.h" + +namespace art { +namespace { + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveLocal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Env()->AddLocalReference(obj); + soa.Env()->DeleteLocalRef(ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeLocal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Env()->AddLocalReference(obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Env()->DeleteLocalRef(ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); + soa.Vm()->DeleteGlobalRef(soa.Self(), ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Vm()->DeleteGlobalRef(soa.Self(), ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveWeakGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); + soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeWeakGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + ObjPtr obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeHandleScopeRef( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + for (jint i = 0; i < reps; ++i) { + soa.Decode(jobj); + } +} + +} // namespace +} // namespace art diff --git a/benchmark/jobject-benchmark/src/JObjectBenchmark.java b/benchmark/jobject-benchmark/src/JObjectBenchmark.java new file mode 100644 index 0000000..90a53b3 --- /dev/null +++ b/benchmark/jobject-benchmark/src/JObjectBenchmark.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class JObjectBenchmark { + public JObjectBenchmark() { + // Make sure to link methods before benchmark starts. + System.loadLibrary("artbenchmark"); + timeAddRemoveLocal(1); + timeDecodeLocal(1); + timeAddRemoveGlobal(1); + timeDecodeGlobal(1); + timeAddRemoveWeakGlobal(1); + timeDecodeWeakGlobal(1); + timeDecodeHandleScopeRef(1); + } + + public native void timeAddRemoveLocal(int reps); + public native void timeDecodeLocal(int reps); + public native void timeAddRemoveGlobal(int reps); + public native void timeDecodeGlobal(int reps); + public native void timeAddRemoveWeakGlobal(int reps); + public native void timeDecodeWeakGlobal(int reps); + public native void timeDecodeHandleScopeRef(int reps); +} diff --git a/benchmark/micro-native/micro_native.cc b/benchmark/micro-native/micro_native.cc new file mode 100644 index 0000000..dffbf3b --- /dev/null +++ b/benchmark/micro-native/micro_native.cc @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#ifndef NATIVE_METHOD +#define NATIVE_METHOD(className, functionName, signature) \ + { #functionName, signature, reinterpret_cast(className ## _ ## functionName) } +#endif +#define NELEM(x) (sizeof(x)/sizeof((x)[0])) + +#define GLUE4(a, b, c, d) a ## b ## c ## d +#define GLUE4_(a, b, c, d) GLUE4(a, b, c, d) + +#define CLASS_NAME "benchmarks/MicroNative/java/NativeMethods" +#define CLASS_INFIX benchmarks_MicroNative_java_NativeMethods + +#define NAME_NORMAL_JNI_METHOD(name) GLUE4_(Java_, CLASS_INFIX, _, name) +#define NAME_CRITICAL_JNI_METHOD(name) GLUE4_(JavaCritical_, CLASS_INFIX, _, name) + +#define DEFINE_NORMAL_JNI_METHOD(ret, name) extern "C" JNIEXPORT ret JNICALL GLUE4_(Java_, CLASS_INFIX, _, name) +#define DEFINE_CRITICAL_JNI_METHOD(ret, name) extern "C" JNIEXPORT ret JNICALL GLUE4_(JavaCritical_, CLASS_INFIX, _, name) + +static void NativeMethods_emptyJniStaticSynchronizedMethod0(JNIEnv*, jclass) { } +static void NativeMethods_emptyJniSynchronizedMethod0(JNIEnv*, jclass) { } + +static JNINativeMethod gMethods_NormalOnly[] = { + NATIVE_METHOD(NativeMethods, emptyJniStaticSynchronizedMethod0, "()V"), + NATIVE_METHOD(NativeMethods, emptyJniSynchronizedMethod0, "()V"), +}; + +static void NativeMethods_emptyJniMethod0(JNIEnv*, jobject) { } +static void NativeMethods_emptyJniMethod6(JNIEnv*, jobject, int, int, int, int, int, int) { } +static void NativeMethods_emptyJniMethod6L(JNIEnv*, jobject, jobject, jarray, jarray, jobject, + jarray, jarray) { } +static void NativeMethods_emptyJniStaticMethod6L(JNIEnv*, jclass, jobject, jarray, jarray, jobject, + jarray, jarray) { } + +static void NativeMethods_emptyJniStaticMethod0(JNIEnv*, jclass) { } +static void NativeMethods_emptyJniStaticMethod6(JNIEnv*, jclass, int, int, int, int, int, int) { } + +static JNINativeMethod gMethods[] = { + NATIVE_METHOD(NativeMethods, emptyJniMethod0, "()V"), + NATIVE_METHOD(NativeMethods, emptyJniMethod6, "(IIIIII)V"), + NATIVE_METHOD(NativeMethods, emptyJniMethod6L, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6L, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod0, "()V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6, "(IIIIII)V"), +}; + +static void NativeMethods_emptyJniMethod0_Fast(JNIEnv*, jobject) { } +static void NativeMethods_emptyJniMethod6_Fast(JNIEnv*, jobject, int, int, int, int, int, int) { } +static void NativeMethods_emptyJniMethod6L_Fast(JNIEnv*, jobject, jobject, jarray, jarray, jobject, + jarray, jarray) { } +static void NativeMethods_emptyJniStaticMethod6L_Fast(JNIEnv*, jclass, jobject, jarray, jarray, + jobject, jarray, jarray) { } + +static void NativeMethods_emptyJniStaticMethod0_Fast(JNIEnv*, jclass) { } +static void NativeMethods_emptyJniStaticMethod6_Fast(JNIEnv*, jclass, int, int, int, int, int, int) { } + +static JNINativeMethod gMethods_Fast[] = { + NATIVE_METHOD(NativeMethods, emptyJniMethod0_Fast, "()V"), + NATIVE_METHOD(NativeMethods, emptyJniMethod6_Fast, "(IIIIII)V"), + NATIVE_METHOD(NativeMethods, emptyJniMethod6L_Fast, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6L_Fast, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod0_Fast, "()V"), + NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6_Fast, "(IIIIII)V"), +}; + +// Have both a Java_ and a JavaCritical_ version of the same empty method. +// The runtime automatically selects the right one when doing a dlsym-based native lookup. +DEFINE_NORMAL_JNI_METHOD(void, emptyJniStaticMethod0_1Critical)(JNIEnv*, jclass) { } +DEFINE_CRITICAL_JNI_METHOD(void, emptyJniStaticMethod0_1Critical)() { } +DEFINE_NORMAL_JNI_METHOD(void, emptyJniStaticMethod6_1Critical)(JNIEnv*, jclass, int, int, int, int, int, int) { } +DEFINE_CRITICAL_JNI_METHOD(void, emptyJniStaticMethod6_1Critical)(int, int, int, int, int, int) { } + +static JNINativeMethod gMethods_Critical[] = { + // Don't use NATIVE_METHOD because the name is mangled differently. + { "emptyJniStaticMethod0_Critical", "()V", + reinterpret_cast(NAME_CRITICAL_JNI_METHOD(emptyJniStaticMethod0_1Critical)) }, + { "emptyJniStaticMethod6_Critical", "(IIIIII)V", + reinterpret_cast(NAME_CRITICAL_JNI_METHOD(emptyJniStaticMethod6_1Critical)) } +}; + +void jniRegisterNativeMethods(JNIEnv* env, + const char* className, + const JNINativeMethod* methods, + int numMethods) { + jclass c = env->FindClass(className); + if (c == nullptr) { + char* tmp; + const char* msg; + if (asprintf(&tmp, + "Native registration unable to find class '%s'; aborting...", + className) == -1) { + // Allocation failed, print default warning. + msg = "Native registration unable to find class; aborting..."; + } else { + msg = tmp; + } + env->FatalError(msg); + } + + if (env->RegisterNatives(c, methods, numMethods) < 0) { + char* tmp; + const char* msg; + if (asprintf(&tmp, "RegisterNatives failed for '%s'; aborting...", className) == -1) { + // Allocation failed, print default warning. + msg = "RegisterNatives failed; aborting..."; + } else { + msg = tmp; + } + env->FatalError(msg); + } +} + +void register_micro_native_methods(JNIEnv* env) { + jniRegisterNativeMethods(env, CLASS_NAME, gMethods_NormalOnly, NELEM(gMethods_NormalOnly)); + jniRegisterNativeMethods(env, CLASS_NAME, gMethods, NELEM(gMethods)); + jniRegisterNativeMethods(env, CLASS_NAME, gMethods_Fast, NELEM(gMethods_Fast)); + + if (env->FindClass("dalvik/annotation/optimization/CriticalNative") != nullptr) { + // Only register them explicitly if the annotation is present. + jniRegisterNativeMethods(env, CLASS_NAME, gMethods_Critical, NELEM(gMethods_Critical)); + } else { + if (env->ExceptionCheck()) { + // It will throw NoClassDefFoundError + env->ExceptionClear(); + } + } + // else let them be registered implicitly. +} diff --git a/benchmark/scoped-primitive-array/info.txt b/benchmark/scoped-primitive-array/info.txt new file mode 100644 index 0000000..93abb7c --- /dev/null +++ b/benchmark/scoped-primitive-array/info.txt @@ -0,0 +1 @@ +Tests for measuring performance of ScopedPrimitiveArray. diff --git a/benchmark/scoped-primitive-array/scoped_primitive_array.cc b/benchmark/scoped-primitive-array/scoped_primitive_array.cc new file mode 100644 index 0000000..005cae4 --- /dev/null +++ b/benchmark/scoped-primitive-array/scoped_primitive_array.cc @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni.h" +#include "nativehelper/ScopedPrimitiveArray.h" + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureByteArray( + JNIEnv* env, jclass, int reps, jbyteArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedByteArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureShortArray( + JNIEnv* env, jclass, int reps, jshortArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedShortArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureIntArray( + JNIEnv* env, jclass, int reps, jintArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedIntArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureLongArray( + JNIEnv* env, jclass, int reps, jlongArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedLongArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} diff --git a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java new file mode 100644 index 0000000..0ad9c36 --- /dev/null +++ b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class ScopedPrimitiveArrayBenchmark { + // Measure adds the first and last element of the array by using ScopedPrimitiveArray. + static native long measureByteArray(int reps, byte[] arr); + static native long measureShortArray(int reps, short[] arr); + static native long measureIntArray(int reps, int[] arr); + static native long measureLongArray(int reps, long[] arr); + + static final int smallLength = 16; + static final int mediumLength = 256; + static final int largeLength = 8096; + static byte[] smallBytes = new byte[smallLength]; + static byte[] mediumBytes = new byte[mediumLength]; + static byte[] largeBytes = new byte[largeLength]; + static short[] smallShorts = new short[smallLength]; + static short[] mediumShorts = new short[mediumLength]; + static short[] largeShorts = new short[largeLength]; + static int[] smallInts = new int[smallLength]; + static int[] mediumInts = new int[mediumLength]; + static int[] largeInts = new int[largeLength]; + static long[] smallLongs = new long[smallLength]; + static long[] mediumLongs = new long[mediumLength]; + static long[] largeLongs = new long[largeLength]; + + public void timeSmallBytes(int reps) { + measureByteArray(reps, smallBytes); + } + + public void timeMediumBytes(int reps) { + measureByteArray(reps, mediumBytes); + } + + public void timeLargeBytes(int reps) { + measureByteArray(reps, largeBytes); + } + + public void timeSmallShorts(int reps) { + measureShortArray(reps, smallShorts); + } + + public void timeMediumShorts(int reps) { + measureShortArray(reps, mediumShorts); + } + + public void timeLargeShorts(int reps) { + measureShortArray(reps, largeShorts); + } + + public void timeSmallInts(int reps) { + measureIntArray(reps, smallInts); + } + + public void timeMediumInts(int reps) { + measureIntArray(reps, mediumInts); + } + + public void timeLargeInts(int reps) { + measureIntArray(reps, largeInts); + } + + public void timeSmallLongs(int reps) { + measureLongArray(reps, smallLongs); + } + + public void timeMediumLongs(int reps) { + measureLongArray(reps, mediumLongs); + } + + public void timeLargeLongs(int reps) { + measureLongArray(reps, largeLongs); + } + + { + System.loadLibrary("artbenchmark"); + } +} diff --git a/benchmark/string-indexof/info.txt b/benchmark/string-indexof/info.txt new file mode 100644 index 0000000..cc04217 --- /dev/null +++ b/benchmark/string-indexof/info.txt @@ -0,0 +1 @@ +Benchmarks for repeating String.indexOf() instructions in a loop. diff --git a/benchmark/string-indexof/src/StringIndexOfBenchmark.java b/benchmark/string-indexof/src/StringIndexOfBenchmark.java new file mode 100644 index 0000000..481a27a --- /dev/null +++ b/benchmark/string-indexof/src/StringIndexOfBenchmark.java @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class StringIndexOfBenchmark { + public static final String string36 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; // length = 36 + + public void timeIndexOf0(int count) { + final char c = '0'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf1(int count) { + final char c = '1'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf2(int count) { + final char c = '2'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf3(int count) { + final char c = '3'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf4(int count) { + final char c = '4'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf7(int count) { + final char c = '7'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf8(int count) { + final char c = '8'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOfF(int count) { + final char c = 'F'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOfG(int count) { + final char c = 'G'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOfV(int count) { + final char c = 'V'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOfW(int count) { + final char c = 'W'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + public void timeIndexOf_(int count) { + final char c = '_'; + String s = string36; + for (int i = 0; i < count; ++i) { + $noinline$indexOf(s, c); + } + } + + static int $noinline$indexOf(String s, char c) { + if (doThrow) { throw new Error(); } + return s.indexOf(c); + } + + public static boolean doThrow = false; +} diff --git a/benchmark/stringbuilder-append/info.txt b/benchmark/stringbuilder-append/info.txt new file mode 100644 index 0000000..ae58812 --- /dev/null +++ b/benchmark/stringbuilder-append/info.txt @@ -0,0 +1 @@ +Benchmarks for the StringBuilder append pattern. diff --git a/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java b/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java new file mode 100644 index 0000000..1550e81 --- /dev/null +++ b/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class StringBuilderAppendBenchmark { + public static String string1 = "s1"; + public static String string2 = "s2"; + public static String longString1 = "This is a long string 1"; + public static String longString2 = "This is a long string 2"; + public static int int1 = 42; + + public void timeAppendStrings(int count) { + String s1 = string1; + String s2 = string2; + int sum = 0; + for (int i = 0; i < count; ++i) { + String result = s1 + s2; + sum += result.length(); // Make sure the append is not optimized away. + } + if (sum != count * (s1.length() + s2.length())) { + throw new AssertionError(); + } + } + + public void timeAppendLongStrings(int count) { + String s1 = longString1; + String s2 = longString2; + int sum = 0; + for (int i = 0; i < count; ++i) { + String result = s1 + s2; + sum += result.length(); // Make sure the append is not optimized away. + } + if (sum != count * (s1.length() + s2.length())) { + throw new AssertionError(); + } + } + + public void timeAppendStringAndInt(int count) { + String s1 = string1; + int i1 = int1; + int sum = 0; + for (int i = 0; i < count; ++i) { + String result = s1 + i1; + sum += result.length(); // Make sure the append is not optimized away. + } + if (sum != count * (s1.length() + Integer.toString(i1).length())) { + throw new AssertionError(); + } + } +} diff --git a/benchmark/type-check/info.txt b/benchmark/type-check/info.txt new file mode 100644 index 0000000..d14fb96 --- /dev/null +++ b/benchmark/type-check/info.txt @@ -0,0 +1 @@ +Benchmarks for repeating check-cast and instance-of instructions in a loop. diff --git a/benchmark/type-check/src/TypeCheckBenchmark.java b/benchmark/type-check/src/TypeCheckBenchmark.java new file mode 100644 index 0000000..96904d9 --- /dev/null +++ b/benchmark/type-check/src/TypeCheckBenchmark.java @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class TypeCheckBenchmark { + public void timeCheckCastLevel1ToLevel1(int count) { + Object[] arr = arr1; + for (int i = 0; i < count; ++i) { + Level1 l1 = (Level1) arr[i & 1023]; + } + } + + public void timeCheckCastLevel2ToLevel1(int count) { + Object[] arr = arr2; + for (int i = 0; i < count; ++i) { + Level1 l1 = (Level1) arr[i & 1023]; + } + } + + public void timeCheckCastLevel3ToLevel1(int count) { + Object[] arr = arr3; + for (int i = 0; i < count; ++i) { + Level1 l1 = (Level1) arr[i & 1023]; + } + } + + public void timeCheckCastLevel9ToLevel1(int count) { + Object[] arr = arr9; + for (int i = 0; i < count; ++i) { + Level1 l1 = (Level1) arr[i & 1023]; + } + } + + public void timeCheckCastLevel9ToLevel2(int count) { + Object[] arr = arr9; + for (int i = 0; i < count; ++i) { + Level2 l2 = (Level2) arr[i & 1023]; + } + } + + public void timeInstanceOfLevel1ToLevel1(int count) { + int sum = 0; + Object[] arr = arr1; + for (int i = 0; i < count; ++i) { + if (arr[i & 1023] instanceof Level1) { + ++sum; + } + } + result = sum; + } + + public void timeInstanceOfLevel2ToLevel1(int count) { + int sum = 0; + Object[] arr = arr2; + for (int i = 0; i < count; ++i) { + if (arr[i & 1023] instanceof Level1) { + ++sum; + } + } + result = sum; + } + + public void timeInstanceOfLevel3ToLevel1(int count) { + int sum = 0; + Object[] arr = arr3; + for (int i = 0; i < count; ++i) { + if (arr[i & 1023] instanceof Level1) { + ++sum; + } + } + result = sum; + } + + public void timeInstanceOfLevel9ToLevel1(int count) { + int sum = 0; + Object[] arr = arr9; + for (int i = 0; i < count; ++i) { + if (arr[i & 1023] instanceof Level1) { + ++sum; + } + } + result = sum; + } + + public void timeInstanceOfLevel9ToLevel2(int count) { + int sum = 0; + Object[] arr = arr9; + for (int i = 0; i < count; ++i) { + if (arr[i & 1023] instanceof Level2) { + ++sum; + } + } + result = sum; + } + + public static Object[] createArray(int level) { + try { + Class[] ls = { + null, + Level1.class, + Level2.class, + Level3.class, + Level4.class, + Level5.class, + Level6.class, + Level7.class, + Level8.class, + Level9.class, + }; + Class l = ls[level]; + Object[] array = new Object[1024]; + for (int i = 0; i < array.length; ++i) { + array[i] = l.newInstance(); + } + return array; + } catch (Exception unexpected) { + throw new Error("Initialization failure!"); + } + } + Object[] arr1 = createArray(1); + Object[] arr2 = createArray(2); + Object[] arr3 = createArray(3); + Object[] arr9 = createArray(9); + int result; +} + +class Level1 { } +class Level2 extends Level1 { } +class Level3 extends Level2 { } +class Level4 extends Level3 { } +class Level5 extends Level4 { } +class Level6 extends Level5 { } +class Level7 extends Level6 { } +class Level8 extends Level7 { } +class Level9 extends Level8 { } diff --git a/build/Android.bp b/build/Android.bp new file mode 100644 index 0000000..946e5a6 --- /dev/null +++ b/build/Android.bp @@ -0,0 +1,250 @@ +bootstrap_go_package { + name: "soong-art", + pkgPath: "android/soong/art", + deps: [ + "blueprint", + "blueprint-pathtools", + "blueprint-proptools", + "soong", + "soong-android", + "soong-apex", + "soong-cc", + ], + srcs: [ + "art.go", + "codegen.go", + "makevars.go", + ], + pluginFor: ["soong_build"], +} + +art_clang_tidy_errors = [ + "android-cloexec-dup", + "android-cloexec-open", + "bugprone-argument-comment", + "bugprone-lambda-function-name", + "bugprone-unused-raii", // Protect scoped things like MutexLock. + "bugprone-unused-return-value", + "bugprone-virtual-near-miss", + "modernize-use-bool-literals", + "modernize-use-nullptr", + "modernize-use-using", + "performance-faster-string-find", + "performance-for-range-copy", + "performance-implicit-conversion-in-loop", + "performance-noexcept-move-constructor", + "performance-unnecessary-copy-initialization", + "performance-unnecessary-value-param", + "misc-unused-using-decls", +] + +art_clang_tidy_disabled = [ + "-google-default-arguments", + // We have local stores that are only used for debug checks. + "-clang-analyzer-deadcode.DeadStores", + // We are OK with some static globals and that they can, in theory, throw. + "-cert-err58-cpp", + // We have lots of C-style variadic functions, and are OK with them. JNI ensures + // that working around this warning would be extra-painful. + "-cert-dcl50-cpp", + // "Modernization" we don't agree with. + "-modernize-use-auto", + "-modernize-return-braced-init-list", + "-modernize-use-default-member-init", + "-modernize-pass-by-value", +] + +art_global_defaults { + // Additional flags are computed by art.go + + name: "art_defaults", + + // This is the default visibility for the //art package, but we repeat it + // here so that it gets merged with other visibility rules in modules + // extending these defaults. + visibility: ["//art:__subpackages__"], + + cflags: [ + // Base set of cflags used by all things ART. + "-fno-rtti", + "-ggdb3", + "-Wall", + "-Werror", + "-Wextra", + "-Wstrict-aliasing", + "-fstrict-aliasing", + "-Wunreachable-code", + "-Wredundant-decls", + "-Wshadow", + "-Wunused", + "-fvisibility=protected", + + // Warn about thread safety violations with clang. + "-Wthread-safety", + // TODO(b/144045034): turn on -Wthread-safety-negative + //"-Wthread-safety-negative", + + // Warn if switch fallthroughs aren't annotated. + "-Wimplicit-fallthrough", + + // Enable float equality warnings. + "-Wfloat-equal", + + // Enable warning of converting ints to void*. + "-Wint-to-void-pointer-cast", + + // Enable warning of wrong unused annotations. + "-Wused-but-marked-unused", + + // Enable warning for deprecated language features. + "-Wdeprecated", + + // Enable warning for unreachable break & return. + "-Wunreachable-code-break", + "-Wunreachable-code-return", + + // Disable warning for use of offsetof on non-standard layout type. + // We use it to implement OFFSETOF_MEMBER - see macros.h. + "-Wno-invalid-offsetof", + + // Enable inconsistent-missing-override warning. This warning is disabled by default in + // Android. + "-Winconsistent-missing-override", + + // Enable thread annotations for std::mutex, etc. + "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS", + ], + + arch: { + x86: { + avx2: { + cflags: [ + "-mavx2", + "-mfma", + ], + }, + }, + x86_64: { + avx2: { + cflags: [ + "-mavx2", + "-mfma", + ], + }, + }, + }, + + target: { + android: { + cflags: [ + "-DART_TARGET", + + // To use oprofile_android --callgraph, uncomment this and recompile with + // mmma -j art + // "-fno-omit-frame-pointer", + // "-marm", + // "-mapcs", + ], + header_libs: [ + // We optimize Thread::Current() with a direct TLS access. This requires access to a + // platform specific Bionic header. + "bionic_libc_platform_headers", + ], + }, + linux: { + cflags: [ + // Enable missing-noreturn only on non-Mac. As lots of things are not implemented for + // Apple, it's a pain. + "-Wmissing-noreturn", + ], + }, + linux_bionic: { + header_libs: [ + // We optimize Thread::Current() with a direct TLS access. This requires access to a + // platform specific Bionic header. + "bionic_libc_platform_headers", + ], + strip: { + // Do not strip art libs when building for linux-bionic. + // Otherwise we can't get any symbols out of crashes. + none: true, + }, + }, + darwin: { + enabled: false, + }, + host: { + cflags: [ + // Bug: 15446488. We don't omit the frame pointer to work around + // clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress. + "-fno-omit-frame-pointer", + // The build assumes that all our x86/x86_64 hosts (such as buildbots and developer + // desktops) support at least sse4.2/popcount. This firstly implies that the ART + // runtime binary itself may exploit these features. Secondly, this implies that + // the ART runtime passes these feature flags to dex2oat and JIT by calling the + // method InstructionSetFeatures::FromCppDefines(). Since invoking dex2oat directly + // does not pick up these flags, cross-compiling from a x86/x86_64 host to a + // x86/x86_64 target should not be affected. + "-msse4.2", + "-mpopcnt", + ], + }, + }, + + codegen: { + arm: { + cflags: ["-DART_ENABLE_CODEGEN_arm"], + }, + arm64: { + cflags: ["-DART_ENABLE_CODEGEN_arm64"], + }, + x86: { + cflags: ["-DART_ENABLE_CODEGEN_x86"], + }, + x86_64: { + cflags: ["-DART_ENABLE_CODEGEN_x86_64"], + }, + }, + + tidy_checks: art_clang_tidy_errors + art_clang_tidy_disabled, + tidy_checks_as_errors: art_clang_tidy_errors, + + tidy_flags: [ + // The static analyzer treats DCHECK as always enabled; we sometimes get + // false positives when we use DCHECKs with code that relies on NDEBUG. + "-extra-arg=-UNDEBUG", + // clang-tidy complains about functions like: + // void foo() { CHECK(kIsFooEnabled); /* do foo... */ } + // not being marked noreturn if kIsFooEnabled is false. + "-extra-arg=-Wno-missing-noreturn", + // Because tidy doesn't like our flow checks for compile-time configuration and thinks that + // the following code is dead (it is, but not for all configurations), disable unreachable + // code detection in Clang for tidy builds. It is still on for regular build steps, so we + // will still get the "real" errors. + "-extra-arg=-Wno-unreachable-code", + ], +} + +art_debug_defaults { + name: "art_debug_defaults", + visibility: ["//art:__subpackages__"], + cflags: [ + "-DDYNAMIC_ANNOTATIONS_ENABLED=1", + "-DVIXL_DEBUG", + "-UNDEBUG", + ], + asflags: [ + "-UNDEBUG", + ], + target: { + // This has to be duplicated for android and host to make sure it + // comes after the -Wframe-larger-than warnings inserted by art.go + // target-specific properties + android: { + cflags: ["-Wno-frame-larger-than="], + }, + host: { + cflags: ["-Wno-frame-larger-than="], + }, + }, +} diff --git a/build/Android.common.mk b/build/Android.common.mk new file mode 100644 index 0000000..4d702e4 --- /dev/null +++ b/build/Android.common.mk @@ -0,0 +1,99 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_MK +ART_ANDROID_COMMON_MK = true + +ART_TARGET_SUPPORTED_ARCH := arm arm64 x86 x86_64 +ART_HOST_SUPPORTED_ARCH := x86 x86_64 +ART_DEXPREOPT_BOOT_JAR_DIR := system/framework + +ifneq ($(HOST_OS),darwin) + ART_HOST_SUPPORTED_ARCH := x86 x86_64 +else + # Mac OS doesn't support low-4GB allocation in a 64-bit process. So we won't be able to create + # our heaps. + ART_HOST_SUPPORTED_ARCH := x86 +endif + +ART_COVERAGE := false + +ifeq ($(ART_COVERAGE),true) +# https://gcc.gnu.org/onlinedocs/gcc/Cross-profiling.html +GCOV_PREFIX := /data/local/tmp/gcov +# GCOV_PREFIX_STRIP is an integer that defines how many levels should be +# stripped off the beginning of the path. We want the paths in $GCOV_PREFIX to +# be relative to $ANDROID_BUILD_TOP so we can just adb pull from the top and not +# have to worry about placing things ourselves. +GCOV_PREFIX_STRIP := $(shell echo $(ANDROID_BUILD_TOP) | grep -o / | wc -l) +GCOV_ENV := GCOV_PREFIX=$(GCOV_PREFIX) GCOV_PREFIX_STRIP=$(GCOV_PREFIX_STRIP) +else +GCOV_ENV := +endif + +ifeq (,$(filter $(TARGET_ARCH),$(ART_TARGET_SUPPORTED_ARCH))) +$(warning unsupported TARGET_ARCH=$(TARGET_ARCH)) +endif +ifeq (,$(filter $(HOST_ARCH),$(ART_HOST_SUPPORTED_ARCH))) +$(warning unsupported HOST_ARCH=$(HOST_ARCH)) +endif + +# Primary vs. secondary +2ND_TARGET_ARCH := $(TARGET_2ND_ARCH) +TARGET_INSTRUCTION_SET_FEATURES := $(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) +2ND_TARGET_INSTRUCTION_SET_FEATURES := $($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) +ifdef TARGET_2ND_ARCH + ifneq ($(filter %64,$(TARGET_ARCH)),) + ART_PHONY_TEST_TARGET_SUFFIX := 64 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := 32 + else + ART_PHONY_TEST_TARGET_SUFFIX := 32 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + endif +else + ifneq ($(filter %64,$(TARGET_ARCH)),) + ART_PHONY_TEST_TARGET_SUFFIX := 64 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + else + ART_PHONY_TEST_TARGET_SUFFIX := 32 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + endif +endif + +ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX) +ART_HOST_SHLIB_EXTENSION ?= .so +ifeq ($(HOST_PREFER_32_BIT),true) + ART_PHONY_TEST_HOST_SUFFIX := 32 + 2ND_ART_PHONY_TEST_HOST_SUFFIX := + ART_HOST_ARCH := x86 + 2ND_ART_HOST_ARCH := + 2ND_HOST_ARCH := + ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) + 2ND_ART_HOST_OUT_SHARED_LIBRARIES := +else + ART_PHONY_TEST_HOST_SUFFIX := 64 + 2ND_ART_PHONY_TEST_HOST_SUFFIX := 32 + ART_HOST_ARCH := x86_64 + 2ND_ART_HOST_ARCH := x86 + 2ND_HOST_ARCH := x86 + ART_HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT_SHARED_LIBRARIES) + 2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) +endif + +ADB_EXECUTABLE := $(HOST_OUT_EXECUTABLES)/adb +ADB ?= $(ADB_EXECUTABLE) + +endif # ART_ANDROID_COMMON_MK diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk new file mode 100644 index 0000000..0896252 --- /dev/null +++ b/build/Android.common_build.mk @@ -0,0 +1,83 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_BUILD_MK +ART_ANDROID_COMMON_BUILD_MK = true + +include art/build/Android.common.mk + +# These can be overridden via the environment or by editing to +# enable/disable certain build configuration. +# +# For example, to disable everything but the host debug build you use: +# +# (export ART_BUILD_TARGET_NDEBUG=false && export ART_BUILD_TARGET_DEBUG=false && export ART_BUILD_HOST_NDEBUG=false && ...) +# +# Beware that tests may use the non-debug build for performance, notable 055-enum-performance +# +ART_BUILD_TARGET_NDEBUG ?= true +ART_BUILD_TARGET_DEBUG ?= true +ART_BUILD_HOST_NDEBUG ?= true +ART_BUILD_HOST_DEBUG ?= true + +ifeq ($(ART_BUILD_TARGET_NDEBUG),false) +$(info Disabling ART_BUILD_TARGET_NDEBUG) +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),false) +$(info Disabling ART_BUILD_TARGET_DEBUG) +endif +ifeq ($(ART_BUILD_HOST_NDEBUG),false) +$(info Disabling ART_BUILD_HOST_NDEBUG) +endif +ifeq ($(ART_BUILD_HOST_DEBUG),false) +$(info Disabling ART_BUILD_HOST_DEBUG) +endif + +# Enable the read barrier by default. +ART_USE_READ_BARRIER ?= true + +# Default compact dex level to none. +ifeq ($(ART_DEFAULT_COMPACT_DEX_LEVEL),) +ART_DEFAULT_COMPACT_DEX_LEVEL := none +endif + +ART_CPP_EXTENSION := .cc + +ifndef LIBART_IMG_HOST_BASE_ADDRESS + $(error LIBART_IMG_HOST_BASE_ADDRESS unset) +endif + +ifndef LIBART_IMG_TARGET_BASE_ADDRESS + $(error LIBART_IMG_TARGET_BASE_ADDRESS unset) +endif + +# Support for disabling certain builds. +ART_BUILD_TARGET := false +ART_BUILD_HOST := false +ifeq ($(ART_BUILD_TARGET_NDEBUG),true) + ART_BUILD_TARGET := true +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),true) + ART_BUILD_TARGET := true +endif +ifeq ($(ART_BUILD_HOST_NDEBUG),true) + ART_BUILD_HOST := true +endif +ifeq ($(ART_BUILD_HOST_DEBUG),true) + ART_BUILD_HOST := true +endif + +endif # ART_ANDROID_COMMON_BUILD_MK diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk new file mode 100644 index 0000000..3403f2d --- /dev/null +++ b/build/Android.common_path.mk @@ -0,0 +1,162 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_PATH_MK +ART_ANDROID_COMMON_PATH_MK := true + +include art/build/Android.common.mk +include art/build/Android.common_build.mk + +# Directory used for dalvik-cache on device. +ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache + +# Directory used for gtests on device. +# $(TARGET_OUT_DATA_NATIVE_TESTS) will evaluate to the nativetest directory in the target part on +# the host, so we can strip everything but the directory to find out whether it is "nativetest" or +# "nativetest64." +ART_TARGET_NATIVETEST_DIR := /data/$(notdir $(TARGET_OUT_DATA_NATIVE_TESTS))/art + +ART_TARGET_NATIVETEST_OUT := $(TARGET_OUT_DATA_NATIVE_TESTS)/art + +# Directory used for oat tests on device. +ART_TARGET_TEST_DIR := /data/art-test +ART_TARGET_TEST_OUT := $(TARGET_OUT_DATA)/art-test + +# core.oat location on the device. +TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$(DEX2OAT_TARGET_ARCH)/core.oat +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.oat +endif + +CORE_OAT_SUFFIX := .oat + +# core.oat locations under the out directory. +HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core +endif +HOST_CORE_OAT_OUTS := +TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core +endif +TARGET_CORE_OAT_OUTS := + +CORE_IMG_SUFFIX := .art + +# core.art locations under the out directory. +HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core +endif +HOST_CORE_IMG_OUTS := +TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core +endif +TARGET_CORE_IMG_OUTS := + +# Oat location of core.art. +HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art +TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art + +# Modules to compile for core.art. +CORE_IMG_JARS := core-oj core-libart core-icu4j okhttp bouncycastle apache-xml +HOST_CORE_IMG_JARS := $(addsuffix -hostdex,$(CORE_IMG_JARS)) +TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS)) +HOST_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_IMG_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) +ifeq ($(ART_TEST_ANDROID_ROOT),) +TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(ART_DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar) +else +TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar) +endif +HOST_CORE_IMG_DEX_FILES := $(foreach jar,$(HOST_CORE_IMG_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) +TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) + +# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS. +TEST_CORE_JARS := $(CORE_IMG_JARS) conscrypt +HOST_TEST_CORE_JARS := $(addsuffix -hostdex,$(TEST_CORE_JARS)) +TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS)) +HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_TEST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) +ifeq ($(ART_TEST_ANDROID_ROOT),) +TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),/$(ART_DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar) +else +TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar) +endif +HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_TEST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) +TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) + +ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_TEST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) +ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar) + +ART_CORE_SHARED_LIBRARIES := libicu_jni libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti +ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid +ART_HOST_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION)) +ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION)) +ifdef HOST_2ND_ARCH +ART_HOST_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so) +ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so) +endif + +# Both the primary and the secondary arches of the libs are built by depending +# on the module name. +ART_DEBUG_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(lib).com.android.art.debug) +ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) +ifdef TARGET_2ND_ARCH +ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) +endif + +ART_CORE_DEBUGGABLE_EXECUTABLES := \ + dex2oat \ + dexoptanalyzer \ + imgdiag \ + oatdump \ + profman \ + +ART_CORE_EXECUTABLES := \ + dalvikvm \ + dexlist \ + +# Depend on the -target or -host phony targets generated by the build system +# for each module +ART_TARGET_EXECUTABLES := +ifneq ($(ART_BUILD_TARGET_NDEBUG),false) +ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES) $(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)-target) +endif +ifneq ($(ART_BUILD_TARGET_DEBUG),false) +ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)d-target) +endif + +ART_HOST_EXECUTABLES := +ifneq ($(ART_BUILD_HOST_NDEBUG),false) +ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES) $(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)-host) +endif +ifneq ($(ART_BUILD_HOST_DEBUG),false) +ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)d-host) +endif + +# Release ART APEX, included by default in "user" builds. +RELEASE_ART_APEX := com.android.art.release +# Debug ART APEX, included by default in "userdebug" and "eng" +# builds and used in ART device benchmarking. +DEBUG_ART_APEX := com.android.art.debug +# Testing ART APEX, used in ART device testing. +TESTING_ART_APEX := com.android.art.testing + +# Conscrypt APEX +CONSCRYPT_APEX := com.android.conscrypt + +endif # ART_ANDROID_COMMON_PATH_MK diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk new file mode 100644 index 0000000..55b8ae2 --- /dev/null +++ b/build/Android.common_test.mk @@ -0,0 +1,154 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_TEST_MK +ART_ANDROID_COMMON_TEST_MK = true + +include art/build/Android.common_path.mk + +# Directory used for temporary test files on the host. +# TMPDIR is always provided by the build system as $OUT_DIR-unique temporary directory. +ART_HOST_TEST_DIR := $(TMPDIR)/test-art + +# List of known broken tests that we won't attempt to execute. The test name must be the full +# rule name such as test-art-host-oat-optimizing-HelloWorld64. +ART_TEST_KNOWN_BROKEN := + +# List of known failing tests that when executed won't cause test execution to not finish. +# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64. +ART_TEST_KNOWN_FAILING := + +# Keep going after encountering a test failure? +ART_TEST_KEEP_GOING ?= true + +# Do you want run-test to be quieter? run-tests will only show output if they fail. +ART_TEST_QUIET ?= true + +# Define the command run on test failure. $(1) is the name of the test. Executed by the shell. +# If the test was a top-level make target (e.g. `test-art-host-gtest-codegen_test64`), the command +# fails with exit status 1 (returned by the last `grep` statement below). +# Otherwise (e.g., if the test was run as a prerequisite of a compound test command, such as +# `test-art-host-gtest-codegen_test`), the command does not fail, as this would break rules running +# ART_TEST_PREREQ_FINISHED as one of their actions, which expects *all* prerequisites *not* to fail. +define ART_TEST_FAILED + ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \ + (mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \ + echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \ + && (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \ + || (echo -e "$(1) \e[91mFAILED\e[0m" >&2; echo $(MAKECMDGOALS) | grep -q -v $(1)))) +endef + +ifeq ($(ART_TEST_QUIET),true) + ART_TEST_ANNOUNCE_PASS := ( true ) + ART_TEST_ANNOUNCE_RUN := ( true ) + ART_TEST_ANNOUNCE_SKIP_FAILURE := ( true ) + ART_TEST_ANNOUNCE_SKIP_BROKEN := ( true ) +else + # Note the use of '=' and not ':=' is intentional since these are actually functions. + ART_TEST_ANNOUNCE_PASS = ( echo -e "$(1) \e[92mPASSED\e[0m" ) + ART_TEST_ANNOUNCE_RUN = ( echo -e "$(1) \e[95mRUNNING\e[0m") + ART_TEST_ANNOUNCE_SKIP_FAILURE = ( echo -e "$(1) \e[93mSKIPPING DUE TO EARLIER FAILURE\e[0m" ) + ART_TEST_ANNOUNCE_SKIP_BROKEN = ( echo -e "$(1) \e[93mSKIPPING BROKEN TEST\e[0m" ) +endif + +# Define the command run on test success. $(1) is the name of the test. Executed by the shell. +# The command checks prints "PASSED" then checks to see if this was a top-level make target (e.g. +# "mm test-art-host-oat-HelloWorld32"), if it was then it does nothing, otherwise it creates a file +# to be printed in the passing test summary. +define ART_TEST_PASSED + ( $(call ART_TEST_ANNOUNCE_PASS,$(1)) && \ + (echo $(MAKECMDGOALS) | grep -q $(1) || \ + (mkdir -p $(ART_HOST_TEST_DIR)/passed/ && touch $(ART_HOST_TEST_DIR)/passed/$(1)))) +endef + +# Define the command run on test success of multiple prerequisites. $(1) is the name of the test. +# When the test is a top-level make target then a summary of the ran tests is produced. Executed by +# the shell. +define ART_TEST_PREREQ_FINISHED + (echo -e "$(1) \e[32mCOMPLETE\e[0m" && \ + (echo $(MAKECMDGOALS) | grep -q -v $(1) || \ + (([ -d $(ART_HOST_TEST_DIR)/passed/ ] \ + && (echo -e "\e[92mPASSING TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/passed/) \ + || (echo -e "\e[91mNO TESTS PASSED\e[0m")) && \ + ([ -d $(ART_HOST_TEST_DIR)/skipped/ ] \ + && (echo -e "\e[93mSKIPPED TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/skipped/) \ + || (echo -e "\e[92mNO TESTS SKIPPED\e[0m")) && \ + ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ + && (echo -e "\e[91mFAILING TESTS\e[0m" >&2 && ls -1 $(ART_HOST_TEST_DIR)/failed/ >&2) \ + || (echo -e "\e[92mNO TESTS FAILED\e[0m")) \ + && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] && rm -r $(ART_HOST_TEST_DIR) \ + || (rm -r $(ART_HOST_TEST_DIR) && false))))) +endef + +# Define the command executed by the shell ahead of running an art test. $(1) is the name of the +# test. +define ART_TEST_SKIP + ((echo $(ART_TEST_KNOWN_BROKEN) | grep -q -v $(1) \ + && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] || [ $(ART_TEST_KEEP_GOING) = true ])\ + && $(call ART_TEST_ANNOUNCE_RUN,$(1)) ) \ + || ((mkdir -p $(ART_HOST_TEST_DIR)/skipped/ && touch $(ART_HOST_TEST_DIR)/skipped/$(1) \ + && ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ + && $(call ART_TEST_ANNOUNCE_SKIP_FAILURE,$(1)) ) \ + || $(call ART_TEST_ANNOUNCE_SKIP_BROKEN,$(1)) ) && false)) +endef + +# Create a build rule to create the dex file for a test. +# $(1): module prefix, e.g. art-test-dex +# $(2): input test directory in art/test, e.g. HelloWorld +# $(3): target output module path (default module path is used on host) +# $(4): additional dependencies +# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX +# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX +# +# If the input test directory contains a file called main.list, +# then a multi-dex file is created passing main.list as the --main-dex-list +# argument to dx. +define build-art-test-dex + ifeq ($(ART_BUILD_TARGET),true) + include $(CLEAR_VARS) + LOCAL_MODULE := $(1)-$(2) + LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) + LOCAL_NO_STANDARD_LIBRARIES := true + LOCAL_DEX_PREOPT := false + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) + LOCAL_MODULE_TAGS := tests + LOCAL_JAVA_LIBRARIES := $(TARGET_TEST_CORE_JARS) + LOCAL_MODULE_PATH := $(3) + ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_MIN_SDK_VERSION := 19 + LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex + endif + include $(BUILD_JAVA_LIBRARY) + $(5) := $$(LOCAL_INSTALLED_MODULE) + endif + ifeq ($(ART_BUILD_HOST),true) + include $(CLEAR_VARS) + LOCAL_MODULE := $(1)-$(2) + LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) + LOCAL_NO_STANDARD_LIBRARIES := true + LOCAL_DEX_PREOPT := false + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) + LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS) + ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_MIN_SDK_VERSION := 19 + LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex + endif + include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) + $(6) := $$(LOCAL_INSTALLED_MODULE) + endif +endef + +endif # ART_ANDROID_COMMON_TEST_MK diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk new file mode 100644 index 0000000..964a4c8 --- /dev/null +++ b/build/Android.cpplint.mk @@ -0,0 +1,72 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include art/build/Android.common_build.mk + +# Use upstream cpplint (toolpath from .repo/manifests/GLOBAL-PREUPLOAD.cfg). +ART_CPPLINT := external/google-styleguide/cpplint/cpplint.py + +# This file previously configured many cpplint settings. +# Everything that could be moved to CPPLINT.cfg has moved there. +# Please add new settings to CPPLINT.cfg over adding new flags in this file. + +ART_CPPLINT_FLAGS := +# No output when there are no errors. +ART_CPPLINT_QUIET := --quiet + +# 1) Get list of all .h & .cc files in the art directory. +# 2) Prepends 'art/' to each of them to make the full name. +ART_CPPLINT_SRC := $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,*.h) $(call all-subdir-named-files,*$(ART_CPP_EXTENSION))) + +# 1) Get list of all CPPLINT.cfg files in the art directory. +# 2) Prepends 'art/' to each of them to make the full name. +ART_CPPLINT_CFG := $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,CPPLINT.cfg)) + +# "mm cpplint-art" to verify we aren't regressing +# - files not touched since the last build are skipped (quite fast). +.PHONY: cpplint-art +cpplint-art: cpplint-art-phony + +# "mm cpplint-art-all" to manually execute cpplint.py on all files (very slow). +.PHONY: cpplint-art-all +cpplint-art-all: + $(ART_CPPLINT) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_SRC) + +OUT_CPPLINT := $(TARGET_COMMON_OUT_ROOT)/cpplint + +# Build up the list of all targets for linting the ART source files. +ART_CPPLINT_TARGETS := + +define declare-art-cpplint-target +art_cpplint_file := $(1) +art_cpplint_touch := $$(OUT_CPPLINT)/$$(subst /,__,$$(art_cpplint_file)) + +$$(art_cpplint_touch): $$(art_cpplint_file) $(ART_CPPLINT) $(ART_CPPLINT_CFG) art/build/Android.cpplint.mk + $(hide) $(ART_CPPLINT) $(ART_CPPLINT_QUIET) $(ART_CPPLINT_FLAGS) $$< + $(hide) mkdir -p $$(dir $$@) + $(hide) touch $$@ + +ART_CPPLINT_TARGETS += $$(art_cpplint_touch) +endef + +$(foreach file, $(ART_CPPLINT_SRC), $(eval $(call declare-art-cpplint-target,$(file)))) +#$(info $(call declare-art-cpplint-target,$(firstword $(ART_CPPLINT_SRC)))) + +include $(CLEAR_VARS) +LOCAL_MODULE := cpplint-art-phony +LOCAL_MODULE_TAGS := optional +LOCAL_ADDITIONAL_DEPENDENCIES := $(ART_CPPLINT_TARGETS) +include $(BUILD_PHONY_PACKAGE) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk new file mode 100644 index 0000000..95d0f38 --- /dev/null +++ b/build/Android.gtest.mk @@ -0,0 +1,746 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# The path for which all the dex files are relative, not actually the current directory. +LOCAL_PATH := art/test + +include art/build/Android.common_test.mk +include art/build/Android.common_path.mk +include art/build/Android.common_build.mk + +# Subdirectories in art/test which contain dex files used as inputs for gtests. +GTEST_DEX_DIRECTORIES := \ + AbstractMethod \ + AllFields \ + DefaultMethods \ + DexToDexDecompiler \ + Dex2oatVdexTestDex \ + ErroneousA \ + ErroneousB \ + ErroneousInit \ + Extension1 \ + Extension2 \ + ForClassLoaderA \ + ForClassLoaderB \ + ForClassLoaderC \ + ForClassLoaderD \ + ExceptionHandle \ + GetMethodSignature \ + HiddenApi \ + HiddenApiSignatures \ + HiddenApiStubs \ + ImageLayoutA \ + ImageLayoutB \ + IMTA \ + IMTB \ + Instrumentation \ + Interfaces \ + Lookup \ + Main \ + ManyMethods \ + MethodTypes \ + MultiDex \ + MultiDexModifiedSecondary \ + MyClass \ + MyClassNatives \ + Nested \ + NonStaticLeafMethods \ + Packages \ + ProtoCompare \ + ProtoCompare2 \ + ProfileTestMultiDex \ + StaticLeafMethods \ + Statics \ + StaticsFromCode \ + StringLiterals \ + Transaction \ + XandY + +# Create build rules for each dex file recording the dependency. +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gtest,$(dir), \ + $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_TEST_TARGET_GTEST_$(dir)_DEX, \ + ART_TEST_HOST_GTEST_$(dir)_DEX))) + +# Create rules for MainStripped, a copy of Main with the classes.dex stripped +# for the oat file assistant tests. +ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +# Create rules for MainUncompressedAligned, a copy of Main with the classes.dex uncompressed +# for the dex2oat tests. +ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))UncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))UncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +# Create rules for UncompressedEmpty, a classes.dex that is empty and uncompressed +# for the dex2oat tests. +ART_TEST_HOST_GTEST_EmptyUncompressed_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))EmptyUncompressed$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))EmptyUncompressed$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +# Create rules for UncompressedEmptyAligned, a classes.dex that is empty, uncompressed, +# and 4 byte aligned for the dex2oat tests. +ART_TEST_HOST_GTEST_EmptyUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))EmptyUncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_EmptyUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))EmptyUncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +# Create rules for MultiDexUncompressedAligned, a copy of MultiDex with the classes.dex uncompressed +# for the OatFile tests. +ART_TEST_HOST_GTEST_MultiDexUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_MultiDex_DEX))UncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_MultiDex_DEX)) +ART_TEST_TARGET_GTEST_MultiDexUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_MultiDex_DEX))UncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_MultiDex_DEX)) + +ifdef ART_TEST_HOST_GTEST_Main_DEX +$(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) + cp $< $@ + $(call dexpreopt-remove-classes.dex,$@) +endif + +ifdef ART_TEST_TARGET_GTEST_Main_DEX +$(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) + cp $< $@ + $(call dexpreopt-remove-classes.dex,$@) +endif + +ifdef ART_TEST_HOST_GTEST_Main_DEX +$(ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) $(ZIPALIGN) + cp $< $@ + $(call uncompress-dexs, $@) + $(call align-package, $@) +endif + +ifdef ART_TEST_TARGET_GTEST_Main_DEX +$(ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) $(ZIPALIGN) + cp $< $@ + $(call uncompress-dexs, $@) + $(call align-package, $@) +endif + +ifdef ART_TEST_HOST_GTEST_Main_DEX +$(ART_TEST_HOST_GTEST_EmptyUncompressed_DEX): + touch $@_classes.dex + zip -j -qD -X -0 $@ $@_classes.dex + rm $@_classes.dex +endif + +ifdef ART_TEST_TARGET_GTEST_Main_DEX +$(ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX): + touch $@_classes.dex + zip -j -qD -X -0 $@ $@_classes.dex + rm $@_classes.dex +endif + +ifdef ART_TEST_HOST_GTEST_Main_DEX +$(ART_TEST_HOST_GTEST_EmptyUncompressedAligned_DEX): $(ZIPALIGN) + touch $@_classes.dex + zip -j -0 $@_temp.zip $@_classes.dex + $(ZIPALIGN) -f 4 $@_temp.zip $@ + rm $@_classes.dex + rm $@_temp.zip +endif + +ifdef ART_TEST_TARGET_GTEST_Main_DEX +$(ART_TEST_TARGET_GTEST_EmptyUncompressedAligned_DEX): $(ZIPALIGN) + touch $@_classes.dex + zip -j -0 $@_temp.zip $@_classes.dex + $(ZIPALIGN) -f 4 $@_temp.zip $@ + rm $@_classes.dex + rm $@_temp.zip +endif + +ifdef ART_TEST_HOST_GTEST_MultiDex_DEX +$(ART_TEST_HOST_GTEST_MultiDexUncompressedAligned_DEX): $(ART_TEST_HOST_GTEST_MultiDex_DEX) $(ZIPALIGN) + cp $< $@ + $(call uncompress-dexs, $@) + $(call align-package, $@) +endif + +ifdef ART_TEST_TARGET_GTEST_MultiDex_DEX +$(ART_TEST_TARGET_GTEST_MultiDexUncompressedAligned_DEX): $(ART_TEST_TARGET_GTEST_MultiDex_DEX) $(ZIPALIGN) + cp $< $@ + $(call uncompress-dexs, $@) + $(call align-package, $@) +endif + +ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali)) +ART_TEST_GTEST_VerifierDepsMulti_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDepsMulti/*.smali)) +ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) +ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +$(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +$(ART_TEST_TARGET_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +$(ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +$(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifySoftFailDuringClinit/*.smali)) +ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifySoftFailDuringClinit,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +$(ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +$(ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX): $(ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) + +# Dex file dependencies for each gtest. +ART_GTEST_art_dex_file_loader_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex +ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary MyClassNatives Nested VerifierDeps VerifierDepsMulti + +ART_GTEST_atomic_dex_ref_map_test_DEX_DEPS := Interfaces +ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode +ART_GTEST_class_loader_context_test_DEX_DEPS := Main MultiDex MyClass ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD +ART_GTEST_class_table_test_DEX_DEPS := XandY +ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex +ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes +ART_GTEST_dexanalyze_test_DEX_DEPS := MultiDex +ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods +ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Dex2oatVdexTestDex ManyMethods Statics VerifierDeps MainUncompressedAligned EmptyUncompressed EmptyUncompressedAligned StringLiterals +ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps +ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle +ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi HiddenApiStubs +ART_GTEST_hidden_api_test_DEX_DEPS := HiddenApiSignatures Main MultiDex +ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods VerifySoftFailDuringClinit +ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB +ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation +ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives +ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods MyClassNatives +ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) +ART_GTEST_dexoptanalyzer_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) +ART_GTEST_image_space_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Extension1 Extension2 +ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex MainUncompressedAligned MultiDexUncompressedAligned MainStripped Nested MultiDexModifiedSecondary +ART_GTEST_oat_test_DEX_DEPS := Main +ART_GTEST_oat_writer_test_DEX_DEPS := Main +# two_runtimes_test build off dex2oat_environment_test, which does sanity checks on the following dex files. +ART_GTEST_two_runtimes_test_DEX_DEPS := Main MainStripped Nested MultiDex MultiDexModifiedSecondary +ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY +ART_GTEST_proxy_test_DEX_DEPS := Interfaces +ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods +ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex +ART_GTEST_profile_boot_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex MultiDex +ART_GTEST_profiling_info_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY +ART_GTEST_stub_test_DEX_DEPS := AllFields +ART_GTEST_transaction_test_DEX_DEPS := Transaction +ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup +ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested +ART_GTEST_heap_verification_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY +ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps VerifierDepsMulti MultiDex +ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler +ART_GTEST_oatdump_app_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_oatdump_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_reg_type_test_DEX_DEPS := Interfaces + +# The elf writer test has dependencies on core.oat. +ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) +ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32) + +# The two_runtimes_test test has dependencies on core.oat. +ART_GTEST_two_runtimes_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) +ART_GTEST_two_runtimes_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32) + +# The transaction test has dependencies on core.oat. +ART_GTEST_transaction_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) +ART_GTEST_transaction_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32) + +ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \ + $(HOST_CORE_IMAGE_optimizing_64) \ + $(HOST_CORE_IMAGE_optimizing_32) \ + $(HOST_CORE_IMAGE_interpreter_64) \ + $(HOST_CORE_IMAGE_interpreter_32) +ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_optimizing_64) \ + $(TARGET_CORE_IMAGE_optimizing_32) \ + $(TARGET_CORE_IMAGE_interpreter_64) \ + $(TARGET_CORE_IMAGE_interpreter_32) + +ART_GTEST_oat_file_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ + $(HOST_OUT_EXECUTABLES)/dex2oatd +ART_GTEST_oat_file_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ + dex2oatd.com.android.art.debug + +ART_GTEST_oat_file_assistant_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) +ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) + +ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ + $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd +ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ + $(TESTING_ART_APEX) # For dexoptanalyzerd. + +ART_GTEST_image_space_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) +ART_GTEST_image_space_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) + +ART_GTEST_dex2oat_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ + $(HOST_OUT_EXECUTABLES)/dex2oatd +ART_GTEST_dex2oat_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ + $(TESTING_ART_APEX) # For dex2oatd. + +ART_GTEST_dex2oat_image_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ + $(HOST_OUT_EXECUTABLES)/dex2oatd +ART_GTEST_dex2oat_image_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ + $(TESTING_ART_APEX) # For dex2oatd. + +ART_GTEST_module_exclusion_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_image_test_HOST_DEPS) +ART_GTEST_module_exclusion_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_image_test_TARGET_DEPS) + +# TODO: document why this is needed. +ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) + +# The dexdiag test requires the dexdiag utility. +ART_GTEST_dexdiag_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/dexdiag +ART_GTEST_dexdiag_test_TARGET_DEPS := $(TESTING_ART_APEX) # For dexdiag. + +# The dexdump test requires an image and the dexdump utility. +# TODO: rename into dexdump when migration completes +ART_GTEST_dexdump_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/dexdump +ART_GTEST_dexdump_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + dexdump.com.android.art.debug + +# The dexanalyze test requires an image and the dexanalyze utility. +ART_GTEST_dexanalyze_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/dexanalyze +ART_GTEST_dexanalyze_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + dexanalyze.com.android.art.debug + +# The dexlayout test requires an image and the dexlayout utility. +# TODO: rename into dexdump when migration completes +ART_GTEST_dexlayout_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/dexlayoutd \ + $(HOST_OUT_EXECUTABLES)/dexdump +ART_GTEST_dexlayout_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + dexlayoutd.com.android.art.debug \ + dexdump.com.android.art.debug + +# The dexlist test requires an image and the dexlist utility. +ART_GTEST_dexlist_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/dexlist +ART_GTEST_dexlist_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + $(TESTING_ART_APEX) # For dexlist. + +# The imgdiag test has dependencies on core.oat since it needs to load it during the test. +# For the host, also add the installed tool (in the base size, that should suffice). For the +# target, just the module is fine, the sync will happen late enough. +ART_GTEST_imgdiag_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/imgdiagd +ART_GTEST_imgdiag_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + imgdiagd.com.android.art.debug + +# Dex analyze test requires dexanalyze. +ART_GTEST_dexanalyze_test_HOST_DEPS := \ + $(HOST_OUT_EXECUTABLES)/dexanalyze +ART_GTEST_dexanalyze_test_TARGET_DEPS := \ + dexanalyze.com.android.art.debug + +# Oatdump test requires an image and oatfile to dump. +ART_GTEST_oatdump_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/oatdumpd \ + $(HOST_OUT_EXECUTABLES)/oatdumpds \ + $(HOST_OUT_EXECUTABLES)/dexdump \ + $(HOST_OUT_EXECUTABLES)/dex2oatd \ + $(HOST_OUT_EXECUTABLES)/dex2oatds +ART_GTEST_oatdump_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_DEFAULT_64) \ + $(TARGET_CORE_IMAGE_DEFAULT_32) \ + $(TESTING_ART_APEX) # For oatdumpd, dexdump, dex2oatd. +ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) +ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) +ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) +ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) + +# Profile assistant tests requires profman utility. +ART_GTEST_profile_assistant_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/profmand +ART_GTEST_profile_assistant_test_TARGET_DEPS := $(TESTING_ART_APEX) # For profmand. + +ART_GTEST_hiddenapi_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_DEFAULT_64) \ + $(HOST_CORE_IMAGE_DEFAULT_32) \ + $(HOST_OUT_EXECUTABLES)/hiddenapid + +# The path for which all the source files are relative, not actually the current directory. +LOCAL_PATH := art + +ART_TEST_MODULES := \ + art_cmdline_tests \ + art_compiler_host_tests \ + art_compiler_tests \ + art_dex2oat_tests \ + art_dexanalyze_tests \ + art_dexdiag_tests \ + art_dexdump_tests \ + art_dexlayout_tests \ + art_dexlist_tests \ + art_dexoptanalyzer_tests \ + art_hiddenapi_tests \ + art_imgdiag_tests \ + art_libartbase_tests \ + art_libartpalette_tests \ + art_libdexfile_external_tests \ + art_libdexfile_support_static_tests \ + art_libdexfile_support_tests \ + art_libdexfile_tests \ + art_libprofile_tests \ + art_oatdump_tests \ + art_profman_tests \ + art_runtime_compiler_tests \ + art_runtime_tests \ + art_sigchain_tests \ + +ART_TARGET_GTEST_NAMES := $(foreach tm,$(ART_TEST_MODULES),\ + $(foreach path,$(ART_TEST_LIST_device_$(TARGET_ARCH)_$(tm)),\ + $(notdir $(path))\ + )\ +) + +ART_HOST_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_$(m))) + +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_ART_HOST_GTEST_FILES += $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_$(m))) +endif + +# Variables holding collections of gtest pre-requisits used to run a number of gtests. +ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST_RULES := +ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST_RULES := +ART_TEST_HOST_GTEST_DEPENDENCIES := +ART_TEST_TARGET_GTEST_DEPENDENCIES := + +ART_GTEST_TARGET_ANDROID_ROOT := '/system' +ifneq ($(ART_TEST_ANDROID_ROOT),) + ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT) +endif + +ART_GTEST_TARGET_ANDROID_I18N_ROOT := '/apex/com.android.i18n' +ifneq ($(ART_TEST_ANDROID_I18N_ROOT),) + ART_GTEST_TARGET_ANDROID_I18N_ROOT := $(ART_TEST_ANDROID_I18N_ROOT) +endif + +ART_GTEST_TARGET_ANDROID_ART_ROOT := '/apex/com.android.art' +ifneq ($(ART_TEST_ANDROID_ART_ROOT),) + ART_GTEST_TARGET_ANDROID_ART_ROOT := $(ART_TEST_ANDROID_ART_ROOT) +endif + +ART_GTEST_TARGET_ANDROID_TZDATA_ROOT := '/apex/com.android.tzdata' +ifneq ($(ART_TEST_ANDROID_TZDATA_ROOT),) + ART_GTEST_TARGET_ANDROID_TZDATA_ROOT := $(ART_TEST_ANDROID_TZDATA_ROOT) +endif + +# Define make rules for a host gtests. +# $(1): gtest name - the name of the test we're building such as leb128_test. +# $(2): path relative to $OUT to the test binary +# $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +define define-art-gtest-rule-host + gtest_suffix := $(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX) + gtest_rule := test-art-host-gtest-$$(gtest_suffix) + gtest_output := $(call intermediates-dir-for,PACKAGING,art-host-gtest,HOST)/$$(gtest_suffix).xml + $$(call dist-for-goals,$$(gtest_rule),$$(gtest_output):gtest/$$(gtest_suffix)) + gtest_exe := $(2) + # Dependencies for all host gtests. + gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \ + $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libicu_jni$$(ART_HOST_SHLIB_EXTENSION) \ + $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \ + $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ + $$(gtest_exe) \ + $$(ART_GTEST_$(1)_HOST_DEPS) \ + $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \ + $(HOST_OUT_EXECUTABLES)/signal_dumper + + ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps) + +.PHONY: $$(gtest_rule) +$$(gtest_rule): $$(gtest_output) + +# Re-run the tests, even if nothing changed. Until the build system has a dedicated "no cache" +# option, claim to write a file that is never produced. +$$(gtest_output): .KATI_IMPLICIT_OUTPUTS := $$(gtest_output)-nocache +$$(gtest_output): NAME := $$(gtest_rule) +ifeq (,$(SANITIZE_HOST)) +$$(gtest_output): $$(gtest_exe) $$(gtest_deps) + $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \ + timeout --foreground -k 120s 2400s $(HOST_OUT_EXECUTABLES)/signal_dumper -s 15 \ + $$< --gtest_output=xml:$$@ && \ + $$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME)) +else +# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some +# build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so +# override ASAN_OPTIONS. b/37751350 +# Note 2: Under sanitization, also capture the output, and run it through the stack tool on failure +# (with the x86-64 ABI, as this allows symbolization of both x86 and x86-64). We don't do this in +# general as it loses all the color output, and we have our own symbolization step when not running +# under ASAN. +$$(gtest_output): $$(gtest_exe) $$(gtest_deps) + $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \ + ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s 3600s \ + $(HOST_OUT_EXECUTABLES)/signal_dumper -s 15 \ + $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \ + { $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \ + ( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \ + { echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \ + rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$(NAME))) +endif + + ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) + ART_TEST_HOST_GTEST_RULES += $$(gtest_rule) + ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule) + + + # Clear locally defined variables. + gtest_deps := + gtest_exe := + gtest_output := + gtest_rule := + gtest_suffix := +endef # define-art-gtest-rule-host + +# Add the additional dependencies for the specified test +# $(1): test name +define add-art-gtest-dependencies + # Note that, both the primary and the secondary arches of the libs are built by depending + # on the module name. + gtest_deps := \ + $$(ART_GTEST_$(1)_TARGET_DEPS) \ + $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \ + + ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps) + + # Clear locally defined variables. + gtest_deps := +endef # add-art-gtest-dependencies + +# $(1): file name +# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +define define-art-gtest-host + art_gtest_filename := $(1) + + include $$(CLEAR_VARS) + art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) + ifndef ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + endif + $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),$$(art_gtest_filename),$(2))) + + # Clear locally defined variables. + art_gtest_filename := + art_gtest_name := +endef # define-art-gtest-host + +# Define the rules to build and run gtests for both archs on host. +# $(1): test name +define define-art-gtest-host-both + art_gtest_name := $(1) + +.PHONY: test-art-host-gtest-$$(art_gtest_name) +test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES) + $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + + # Clear now unused variables. + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + art_gtest_name := +endef # define-art-gtest-host-both + +ifeq ($(ART_BUILD_TARGET),true) + $(foreach name,$(ART_TARGET_GTEST_NAMES), $(eval $(call add-art-gtest-dependencies,$(name),))) + ART_TEST_TARGET_GTEST_DEPENDENCIES += \ + libicu_jni.com.android.art.debug \ + libjavacore.com.android.art.debug \ + libopenjdkd.com.android.art.debug \ + $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar) +endif +ifeq ($(ART_BUILD_HOST),true) + $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),))) + ifneq ($(HOST_PREFER_32_BIT),true) + $(foreach file,$(2ND_ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),2ND_))) + endif + # Rules to run the different architecture versions of the gtest. + $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host-both,$$(notdir $$(basename $$(file)))))) +endif + +# Used outside the art project to get a list of the current tests +RUNTIME_TARGET_GTEST_MAKE_TARGETS := +art_target_gtest_files := $(foreach m,$(ART_TEST_MODULES),$(ART_TEST_LIST_device_$(TARGET_ARCH)_$(m))) +# If testdir == testfile, assume this is not a test_per_src module +$(foreach file,$(art_target_gtest_files),\ + $(eval testdir := $$(notdir $$(patsubst %/,%,$$(dir $$(file)))))\ + $(eval testfile := $$(notdir $$(basename $$(file))))\ + $(if $(call streq,$(testdir),$(testfile)),,\ + $(eval testfile := $(testdir)_$(testfile)))\ + $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $(testfile))\ +) +testdir := +testfile := +art_target_gtest_files := + +# Define all the combinations of host/target and suffix such as: +# test-art-host-gtest or test-art-host-gtest64 +# $(1): host or target +# $(2): HOST or TARGET +# $(3): undefined, 32 or 64 +define define-test-art-gtest-combination + ifeq ($(1),host) + ifneq ($(2),HOST) + $$(error argument mismatch $(1) and ($2)) + endif + else + ifneq ($(1),target) + $$(error found $(1) expected host or target) + endif + ifneq ($(2),TARGET) + $$(error argument mismatch $(1) and ($2)) + endif + endif + + rule_name := test-art-$(1)-gtest$(3) + dependencies := $$(ART_TEST_$(2)_GTEST$(3)_RULES) + +.PHONY: $$(rule_name) +$$(rule_name): $$(dependencies) d8 + $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + + # Clear locally defined variables. + rule_name := + dependencies := +endef # define-test-art-gtest-combination + +$(eval $(call define-test-art-gtest-combination,target,TARGET,)) +$(eval $(call define-test-art-gtest-combination,target,TARGET,$(ART_PHONY_TEST_TARGET_SUFFIX))) +ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX +$(eval $(call define-test-art-gtest-combination,target,TARGET,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))) +endif +$(eval $(call define-test-art-gtest-combination,host,HOST,)) +$(eval $(call define-test-art-gtest-combination,host,HOST,$(ART_PHONY_TEST_HOST_SUFFIX))) +ifneq ($(HOST_PREFER_32_BIT),true) +$(eval $(call define-test-art-gtest-combination,host,HOST,$(2ND_ART_PHONY_TEST_HOST_SUFFIX))) +endif + +# Clear locally defined variables. +define-art-gtest-rule-target := +define-art-gtest-rule-host := +define-art-gtest := +define-test-art-gtest-combination := +RUNTIME_GTEST_COMMON_SRC_FILES := +COMPILER_GTEST_COMMON_SRC_FILES := +RUNTIME_GTEST_TARGET_SRC_FILES := +RUNTIME_GTEST_HOST_SRC_FILES := +COMPILER_GTEST_TARGET_SRC_FILES := +COMPILER_GTEST_HOST_SRC_FILES := +ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST_RULES := +ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST_RULES := +ART_GTEST_TARGET_ANDROID_ROOT := +ART_GTEST_TARGET_ANDROID_I18N_ROOT := +ART_GTEST_TARGET_ANDROID_ART_ROOT := +ART_GTEST_TARGET_ANDROID_TZDATA_ROOT := +ART_GTEST_class_linker_test_DEX_DEPS := +ART_GTEST_class_table_test_DEX_DEPS := +ART_GTEST_compiler_driver_test_DEX_DEPS := +ART_GTEST_dex_file_test_DEX_DEPS := +ART_GTEST_exception_test_DEX_DEPS := +ART_GTEST_elf_writer_test_HOST_DEPS := +ART_GTEST_elf_writer_test_TARGET_DEPS := +ART_GTEST_imtable_test_DEX_DEPS := +ART_GTEST_jni_compiler_test_DEX_DEPS := +ART_GTEST_jni_internal_test_DEX_DEPS := +ART_GTEST_oat_file_assistant_test_DEX_DEPS := +ART_GTEST_oat_file_assistant_test_HOST_DEPS := +ART_GTEST_oat_file_assistant_test_TARGET_DEPS := +ART_GTEST_dexanalyze_test_DEX_DEPS := +ART_GTEST_dexoptanalyzer_test_DEX_DEPS := +ART_GTEST_dexoptanalyzer_test_HOST_DEPS := +ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := +ART_GTEST_image_space_test_DEX_DEPS := +ART_GTEST_image_space_test_HOST_DEPS := +ART_GTEST_image_space_test_TARGET_DEPS := +ART_GTEST_dex2oat_test_DEX_DEPS := +ART_GTEST_dex2oat_test_HOST_DEPS := +ART_GTEST_dex2oat_test_TARGET_DEPS := +ART_GTEST_dex2oat_image_test_DEX_DEPS := +ART_GTEST_dex2oat_image_test_HOST_DEPS := +ART_GTEST_dex2oat_image_test_TARGET_DEPS := +ART_GTEST_module_exclusion_test_HOST_DEPS := +ART_GTEST_module_exclusion_test_TARGET_DEPS := +ART_GTEST_object_test_DEX_DEPS := +ART_GTEST_proxy_test_DEX_DEPS := +ART_GTEST_reflection_test_DEX_DEPS := +ART_GTEST_stub_test_DEX_DEPS := +ART_GTEST_transaction_test_DEX_DEPS := +ART_GTEST_dex2oat_environment_tests_DEX_DEPS := +ART_GTEST_heap_verification_test_DEX_DEPS := +ART_GTEST_verifier_deps_test_DEX_DEPS := +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=)) +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=)) +ART_TEST_HOST_GTEST_MainStripped_DEX := +ART_TEST_TARGET_GTEST_MainStripped_DEX := +ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX := +ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX := +ART_TEST_HOST_GTEST_EmptyUncompressed_DEX := +ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX := +ART_TEST_GTEST_VerifierDeps_SRC := +ART_TEST_HOST_GTEST_VerifierDeps_DEX := +ART_TEST_TARGET_GTEST_VerifierDeps_DEX := +ART_TEST_GTEST_VerifySoftFailDuringClinit_SRC := +ART_TEST_HOST_GTEST_VerifySoftFailDuringClinit_DEX := +ART_TEST_TARGET_GTEST_VerifySoftFailDuringClinit_DEX := +GTEST_DEX_DIRECTORIES := +LOCAL_PATH := diff --git a/build/Android.oat.mk b/build/Android.oat.mk new file mode 100644 index 0000000..c6fe400 --- /dev/null +++ b/build/Android.oat.mk @@ -0,0 +1,271 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +######################################################################## +# Rules to build a smaller "core" image to support core libraries +# (that is, non-Android frameworks) testing on the host and target +# +# The main rules to build the default "boot" image are in +# build/core/dex_preopt_libart.mk + +include art/build/Android.common_build.mk + +LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := +ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) +endif +LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := +ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) +endif + +# Use dex2oat debug version for better error reporting +# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks). +# $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds. +define create-core-oat-host-rules + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_dex2oat_dependency := $(DEX2OAT) + + ifeq ($(1),optimizing) + core_compile_options += --compiler-backend=Optimizing + endif + ifeq ($(1),interpreter) + core_compile_options += --compiler-filter=quicken + core_infix := -interpreter + endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=extract --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif + ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) + #Technically this test is not precise, but hopefully good enough. + $$(error found $(1) expected interpreter, interp-ac, or optimizing) + endif + + core_image_location := $(HOST_OUT_JAVA_LIBRARIES)/core$$(core_infix)$(CORE_IMG_SUFFIX) + core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX) + core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX) + + # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. + ifeq ($(2),) + HOST_CORE_IMAGE_$(1)_64 := $$(core_image_name) + else + HOST_CORE_IMAGE_$(1)_32 := $$(core_image_name) + endif + HOST_CORE_IMG_OUTS += $$(core_image_name) + HOST_CORE_OAT_OUTS += $$(core_oat_name) + +$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) +$$(core_image_name): PRIVATE_CORE_IMAGE_LOCATION := $$(core_image_location) +$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) +$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) +# In addition to the primary core image containing HOST_CORE_IMG_DEX_FILES, +# also build a boot image extension for the remaining HOST_CORE_DEX_FILES. +$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) + @echo "host dex2oat: $$@" + @mkdir -p $$(dir $$@) + $$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) \ + --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) \ + --image=$$(PRIVATE_CORE_IMG_NAME) \ + --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) \ + --instruction-set=$$($(2)ART_HOST_ARCH) \ + $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ + --host --android-root=$$(HOST_OUT) \ + --generate-debug-info --generate-build-id \ + --runtime-arg -XX:SlowDebug=true \ + --no-inline-from=core-oj-hostdex.jar \ + $$(PRIVATE_CORE_COMPILE_OPTIONS) && \ + ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) \ + --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + --runtime-arg -Xbootclasspath:$$(subst $$(space),:,$$(strip \ + $$(HOST_CORE_DEX_FILES))) \ + --runtime-arg -Xbootclasspath-locations:$$(subst $$(space),:,$$(strip \ + $$(HOST_CORE_DEX_LOCATIONS))) \ + $$(addprefix --dex-file=, \ + $$(filter-out $$(HOST_CORE_IMG_DEX_FILES),$$(HOST_CORE_DEX_FILES))) \ + $$(addprefix --dex-location=, \ + $$(filter-out $$(HOST_CORE_IMG_DEX_LOCATIONS),$$(HOST_CORE_DEX_LOCATIONS))) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) \ + --boot-image=$$(PRIVATE_CORE_IMAGE_LOCATION) \ + --image=$$(PRIVATE_CORE_IMG_NAME) \ + --instruction-set=$$($(2)ART_HOST_ARCH) \ + $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ + --host --android-root=$$(HOST_OUT) \ + --generate-debug-info --generate-build-id \ + --runtime-arg -XX:SlowDebug=true \ + --no-inline-from=core-oj-hostdex.jar \ + $$(PRIVATE_CORE_COMPILE_OPTIONS) + +$$(core_oat_name): $$(core_image_name) + + # Clean up locally used variables. + core_dex2oat_dependency := + core_compile_options := + core_image_name := + core_oat_name := + core_infix := +endef # create-core-oat-host-rules + +# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks). +define create-core-oat-host-rule-combination + $(call create-core-oat-host-rules,$(1),) + + ifneq ($(HOST_PREFER_32_BIT),true) + $(call create-core-oat-host-rules,$(1),2ND_) + endif +endef + +$(eval $(call create-core-oat-host-rule-combination,optimizing)) +$(eval $(call create-core-oat-host-rule-combination,interpreter)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac)) + +.PHONY: test-art-host-dex2oat-host +test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS) + +# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks). +# $(2): 2ND_ or undefined +define create-core-oat-target-rules + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_dex2oat_dependency := $(DEX2OAT) + + ifeq ($(1),optimizing) + core_compile_options += --compiler-backend=Optimizing + endif + ifeq ($(1),interpreter) + core_compile_options += --compiler-filter=quicken + core_infix := -interpreter + endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=extract --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif + ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) + # Technically this test is not precise, but hopefully good enough. + $$(error found $(1) expected interpreter, interp-ac, or optimizing) + endif + + core_image_location := $(ART_TARGET_TEST_OUT)/core$$(core_infix)$(CORE_IMG_SUFFIX) + core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX) + core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX) + + # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. + ifeq ($(2),) + ifdef TARGET_2ND_ARCH + TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name) + else + TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name) + endif + else + TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name) + endif + TARGET_CORE_IMG_OUTS += $$(core_image_name) + TARGET_CORE_OAT_OUTS += $$(core_oat_name) + +$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) +$$(core_image_name): PRIVATE_CORE_IMAGE_LOCATION := $$(core_image_location) +$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) +$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) +# In addition to the primary core image containing TARGET_CORE_IMG_DEX_FILES, +# also build a boot image extension for the remaining TARGET_CORE_DEX_FILES. +$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency) + @echo "target dex2oat: $$@" + @mkdir -p $$(dir $$@) + $$(hide) $$(DEX2OAT) \ + --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) \ + --image=$$(PRIVATE_CORE_IMG_NAME) \ + --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) \ + --instruction-set=$$($(2)TARGET_ARCH) \ + --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \ + --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ + --android-root=$$(PRODUCT_OUT)/system \ + --generate-debug-info --generate-build-id \ + --runtime-arg -XX:SlowDebug=true \ + $$(PRIVATE_CORE_COMPILE_OPTIONS) && \ + $$(DEX2OAT) \ + --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + --runtime-arg -Xbootclasspath:$$(subst $$(space),:,$$(strip \ + $$(TARGET_CORE_DEX_FILES))) \ + --runtime-arg -Xbootclasspath-locations:$$(subst $$(space),:,$$(strip \ + $$(TARGET_CORE_DEX_LOCATIONS))) \ + $$(addprefix --dex-file=, \ + $$(filter-out $$(TARGET_CORE_IMG_DEX_FILES),$$(TARGET_CORE_DEX_FILES))) \ + $$(addprefix --dex-location=, \ + $$(filter-out $$(TARGET_CORE_IMG_DEX_LOCATIONS),$$(TARGET_CORE_DEX_LOCATIONS))) \ + --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) \ + --boot-image=$$(PRIVATE_CORE_IMAGE_LOCATION) \ + --image=$$(PRIVATE_CORE_IMG_NAME) \ + --instruction-set=$$($(2)TARGET_ARCH) \ + --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \ + --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ + --android-root=$$(PRODUCT_OUT)/system \ + --generate-debug-info --generate-build-id \ + --runtime-arg -XX:SlowDebug=true \ + $$(PRIVATE_CORE_COMPILE_OPTIONS) || \ + (rm $$(PRIVATE_CORE_OAT_NAME); exit 1) + +$$(core_oat_name): $$(core_image_name) + + # Clean up locally used variables. + core_dex2oat_dependency := + core_compile_options := + core_image_name := + core_oat_name := + core_infix := +endef # create-core-oat-target-rules + +# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks). +define create-core-oat-target-rule-combination + $(call create-core-oat-target-rules,$(1),) + + ifdef TARGET_2ND_ARCH + $(call create-core-oat-target-rules,$(1),2ND_) + endif +endef + +$(eval $(call create-core-oat-target-rule-combination,optimizing)) +$(eval $(call create-core-oat-target-rule-combination,interpreter)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac)) + +# Define a default core image that can be used for things like gtests that +# need some image to run, but don't otherwise care which image is used. +HOST_CORE_IMAGE_DEFAULT_32 := $(HOST_CORE_IMAGE_optimizing_32) +HOST_CORE_IMAGE_DEFAULT_64 := $(HOST_CORE_IMAGE_optimizing_64) +TARGET_CORE_IMAGE_DEFAULT_32 := $(TARGET_CORE_IMAGE_optimizing_32) +TARGET_CORE_IMAGE_DEFAULT_64 := $(TARGET_CORE_IMAGE_optimizing_64) diff --git a/build/apex/Android.bp b/build/apex/Android.bp new file mode 100644 index 0000000..22510ef --- /dev/null +++ b/build/apex/Android.bp @@ -0,0 +1,459 @@ +// ART APEX module +// +// Contains both the Android Managed Runtime (ART) and the Android Core Library +// (Libcore). + +// Modules listed in LOCAL_REQUIRED_MODULES for module art-runtime in art/Android.mk. +// - Base requirements (binaries for which both 32- and 64-bit versions are built, if relevant). +art_runtime_base_binaries_both = [ + "dalvikvm", +] +art_runtime_base_binaries_both_on_device_first_on_host = [ + "dex2oat", +] +// - Base requirements (binaries for which a 32-bit version is preferred on device, but for which +// only the "first" (likely 64-bit) version is required on host). +art_runtime_base_binaries_prefer32_on_device_first_on_host = [ + "dexoptanalyzer", + "profman", +] +// - Base requirements (libraries). +// +// Note: ART on-device chroot-based testing and benchmarking is not yet using +// the ART APEX, meaning that copies of some of these libraries have to be +// installed in `/system` for the ART Buildbot set-up to work properly. This is +// done by the `standalone-apex-files` Make phony target, used by the ART +// Buildbot and Golem (see `art/Android.mk`). If you add libraries to this list, +// you may have to also add them to `PRIVATE_ART_APEX_DEPENDENCY_LIBS` in +// `art/Android.mk`. +// TODO(b/121117762): Remove this note when both the ART Buildbot and Golem use +// the ART APEX. +art_runtime_base_native_shared_libs = [ + // External API (having APEX stubs). + "libdexfile_external", + "libnativebridge", + "libnativehelper", + "libnativeloader", + // libadbconnection is internal and loaded with dlopen(), but it cannot use + // "required" because of cyclic dependency (b/124505714). + "libadbconnection", + // TODO(b/124476339): Clean up the following libraries once "required" + // dependencies work with APEX libraries. + "libart", + "libart-compiler", + "libdt_fd_forward", + "libdt_socket", + "libjdwp", + "libnpt", + "libopenjdkjvm", + "libopenjdkjvmti", +] + +art_runtime_base_native_device_only_shared_libs = [ + "libperfetto_hprof", +] + +bionic_native_shared_libs = [ + // External API (having APEX stubs). + "libc", + "libm", + "libdl", +] + +bionic_binaries_both = [ + "linker", +] + +// - Debug variants (binaries for which a 32-bit version is preferred on device, but for which +// only the "first" (likely 64-bit) version is required on host). +art_runtime_debug_binaries_prefer32_on_device_first_on_host = [ + "dexoptanalyzerd", + "profmand", +] +art_runtime_debug_binaries_both_on_device_first_on_host = [ + "dex2oatd", +] + +// - Debug variants (libraries). +art_runtime_debug_native_shared_libs = [ + "libadbconnectiond", + "libartd", + "libartd-compiler", + "libdexfiled_external", + "libopenjdkjvmd", + "libopenjdkjvmtid", +] + +art_runtime_base_native_device_only_debug_shared_libs = [ + "libperfetto_hprofd", +] + +// Libraries needed to execute ART run-tests. +// TODO(b/124476339): When bug 124476339 is fixed, add these libraries as `runtime_libs` +// dependencies of `libartd-compiler`, and remove `art_runtime_run_test_libs`. +art_runtime_run_test_libs = [ + "libart-disassembler", + "libartd-disassembler", +] + +// Tools common to both device APEX and host APEX. Derived from art-tools in art/Android.mk. +art_tools_common_binaries = [ + "dexdump", + "dexlist", +] + +// Tools common to both device and host debug APEXes. +art_tools_debug_binaries = [ + "dexanalyze", + "dexdiag", + "dexlayout", + "dexlayoutd", +] + +art_tools_debug_binaries_both = [ + "imgdiag", + "imgdiagd", +] + +// Tools exclusively for the device APEX derived from art-tools in art/Android.mk. +art_tools_device_only_binaries = [ + // oatdump cannot link with host linux_bionic due to not using clang lld; + // TODO: Make it work with clang lld. + "oatdump", +] +// Same, but for only for debug packages. +art_tools_debug_device_only_binaries = [ + // oatdumpd cannot link with host linux_bionic due to not using clang lld; + // TODO: Make it work with clang lld. + "oatdumpd", +] + +// Tools exclusively for the host APEX derived from art-tools in art/Android.mk. +art_tools_host_only_binaries = [ + // FIXME: Does not work as-is, because `ahat` is defined in tools/ahat/Android.mk + // (same issue as for `libart_fake` above). + //"ahat", + "hprof-conv", +] + +// Core Java libraries. +libcore_java_libs = [ + "core-oj", + "core-libart", + "core-icu4j", + "okhttp", + "bouncycastle", + "apache-xml", +] + +// Native libraries that support the core Java libraries. +// +// Note: ART on-device chroot-based testing and benchmarking is not yet using +// the ART APEX, meaning that copies of some of these libraries have to be +// installed in `/system` for the ART Buildbot set-up to work properly. This is +// done by the `standalone-apex-files` Make phony target, used by the ART +// Buildbot and Golem (see `art/Android.mk`). If you add libraries to this list, +// you may have to also add them to `PRIVATE_ART_APEX_DEPENDENCY_LIBS` in +// `art/Android.mk`. +// TODO(b/121117762): Remove this note when both the ART Buildbot and Golem use +// the ART APEX. +libcore_native_shared_libs = [ + // External API (having APEX stubs). + "libandroidicu", + "libandroidio", + // TODO(b/124476339): Clean up the following libraries once "required" + // dependencies work with APEX libraries. + "libexpat", + "libicui18n", + "libicuuc", + "libicu_jni", + "libjavacore", + "libopenjdk", +] +libcore_debug_native_shared_libs = [ + "libopenjdkd", +] + +libcore_native_device_only_shared_libs = [ + // TODO(b/122876336): Remove libpac.so once it's migrated to Webview. + // libpac is used by frameworks, not by ART host. + "libpac", +] + +// Temporary library includes for b/123591866 as all libraries are moved into the main art-apex. +art_runtime_libraries_zipapex = [ + "libnativebridge", + "libnativeloader", + "libnativehelper", + "libcutils", +] + +android_app_certificate { + name: "com.android.art.certificate", + certificate: "com.android.art", +} + +apex_key { + name: "com.android.art.key", + public_key: "com.android.art.avbpubkey", + private_key: "com.android.art.pem", +} + +prebuilt_etc { + name: "com.android.art.ld.config.txt", + src: "ld.config.txt", + filename: "ld.config.txt", + installable: false, +} + +// Default values shared by device ART APEXes. +apex_defaults { + name: "com.android.art-defaults", + compile_multilib: "both", + manifest: "manifest-art.json", + java_libs: libcore_java_libs, + native_shared_libs: art_runtime_base_native_shared_libs + + art_runtime_base_native_device_only_shared_libs + + libcore_native_device_only_shared_libs + + libcore_native_shared_libs, + multilib: { + both: { + binaries: art_runtime_base_binaries_both + + art_runtime_base_binaries_both_on_device_first_on_host, + }, + prefer32: { + binaries: art_runtime_base_binaries_prefer32_on_device_first_on_host, + }, + first: { + binaries: art_tools_common_binaries + + art_tools_device_only_binaries, + }, + }, + prebuilts: ["com.android.art.ld.config.txt"], + key: "com.android.art.key", + required: [ + "art_apex_boot_integrity", + "com.android.i18n", + ], +} + +// Default values shared by (device) Debug and Testing ART APEXes. +apex_defaults { + name: "com.android.art-dev-defaults", + defaults: ["com.android.art-defaults"], + native_shared_libs: art_runtime_base_native_device_only_debug_shared_libs + + art_runtime_run_test_libs + + art_runtime_debug_native_shared_libs + + libcore_debug_native_shared_libs, + multilib: { + both: { + binaries: art_tools_debug_binaries_both + + art_runtime_debug_binaries_both_on_device_first_on_host, + }, + prefer32: { + binaries: art_runtime_debug_binaries_prefer32_on_device_first_on_host, + }, + first: { + binaries: art_tools_debug_binaries + + art_tools_debug_device_only_binaries, + }, + }, +} + +// Release version of the ART APEX module (not containing debug +// variants nor tools), included in user builds. Also used for +// storage-constrained devices in userdebug and eng builds. +art_apex { + name: "com.android.art.release", + defaults: ["com.android.art-defaults"], + certificate: ":com.android.art.certificate", +} + +// "Debug" version of the ART APEX module (containing both release and +// debug variants, as well as additional tools), included in userdebug and +// eng build. +art_apex { + name: "com.android.art.debug", + defaults: ["com.android.art-dev-defaults"], + certificate: ":com.android.art.certificate", +} + +// ART gtests with dependencies on internal ART APEX libraries. +art_gtests = [ + "art_cmdline_tests", + "art_compiler_tests", + "art_dex2oat_tests", + "art_dexanalyze_tests", + "art_dexdiag_tests", + "art_dexdump_tests", + "art_dexlayout_tests", + "art_dexlist_tests", + "art_dexoptanalyzer_tests", + "art_imgdiag_tests", + "art_libartbase_tests", + "art_libartpalette_tests", + "art_libdexfile_tests", + "art_libdexfile_support_tests", + "art_libprofile_tests", + "art_oatdump_tests", + "art_profman_tests", + "art_runtime_compiler_tests", + "art_runtime_tests", + "art_sigchain_tests", +] + +// "Testing" version of the ART APEX module (containing both release +// and debug variants, additional tools, and ART gtests), for testing +// purposes only. +art_apex_test { + name: "com.android.art.testing", + defaults: ["com.android.art-dev-defaults"], + file_contexts: ":com.android.art.debug-file_contexts", + certificate: ":com.android.art.certificate", + tests: art_gtests, + binaries: ["signal_dumper"], // Need signal_dumper for run-tests. +} + +// TODO: Do this better. art_apex_test_host will disable host builds when +// HOST_PREFER_32_BIT is set. We cannot simply use com.android.art.debug +// because binaries have different multilib classes and 'multilib: {}' isn't +// supported by target: { ... }. +// See b/120617876 for more information. +art_apex_test_host { + name: "com.android.art.host", + compile_multilib: "both", + payload_type: "zip", + host_supported: true, + device_supported: false, + manifest: "manifest-art.json", + java_libs: libcore_java_libs, + ignore_system_library_special_case: true, + native_shared_libs: art_runtime_base_native_shared_libs + + art_runtime_debug_native_shared_libs + + libcore_native_shared_libs + + libcore_debug_native_shared_libs + + art_runtime_libraries_zipapex + + art_runtime_run_test_libs, + multilib: { + both: { + binaries: art_runtime_base_binaries_both + + art_tools_debug_binaries_both, + }, + first: { + binaries: art_runtime_base_binaries_prefer32_on_device_first_on_host + + art_runtime_base_binaries_both_on_device_first_on_host + + art_runtime_debug_binaries_prefer32_on_device_first_on_host + + art_runtime_debug_binaries_both_on_device_first_on_host + + art_tools_common_binaries + + art_tools_debug_binaries + // Host APEX is always debug. + art_tools_host_only_binaries, + }, + }, + key: "com.android.art.key", + target: { + darwin: { + enabled: false, + }, + linux_bionic: { + enabled: true, + multilib: { + both: { + native_shared_libs: bionic_native_shared_libs, + binaries: bionic_binaries_both, + }, + }, + }, + }, +} + +python_binary_host { + name: "art-apex-tester", + srcs: ["art_apex_test.py"], + main: "art_apex_test.py", + version: { + py2: { + enabled: true, + }, + py3: { + enabled: false, + }, + }, +} + +// Genrules so we can run the checker, and empty Java library so that it gets executed. + +art_check_apex_gen_stem = "$(location art-apex-tester)" + + " --debugfs $(location debugfs)" + + " --tmpdir $(genDir)" + +// The non-flattened APEXes are always checked, as they are always generated +// (even when APEX flattening is enabled). +genrule_defaults { + name: "art-check-apex-gen-defaults", + tools: [ + "art-apex-tester", + "debugfs", + ], +} + +cc_defaults { + name: "art-check-apex-gen-fakebin-defaults", + host_supported: true, + device_supported: false, + target: { + darwin: { + enabled: false, // No python3. + }, + }, +} + +genrule { + name: "art-check-release-apex-gen", + defaults: ["art-check-apex-gen-defaults"], + srcs: [":com.android.art.release"], + cmd: art_check_apex_gen_stem + + " --flavor release" + + " $(in)" + + " && touch $(out)", + out: ["art-check-release-apex-gen.dummy"], +} + +cc_prebuilt_binary { + name: "art-check-release-apex-gen-fakebin", + defaults: ["art-check-apex-gen-fakebin-defaults"], + srcs: [":art-check-release-apex-gen"], +} + +genrule { + name: "art-check-debug-apex-gen", + defaults: ["art-check-apex-gen-defaults"], + srcs: [":com.android.art.debug"], + cmd: art_check_apex_gen_stem + + " --flavor debug" + + " $(in)" + + " && touch $(out)", + out: ["art-check-debug-apex-gen.dummy"], +} + +cc_prebuilt_binary { + name: "art-check-debug-apex-gen-fakebin", + defaults: ["art-check-apex-gen-fakebin-defaults"], + srcs: [":art-check-debug-apex-gen"], +} + +genrule { + name: "art-check-testing-apex-gen", + defaults: ["art-check-apex-gen-defaults"], + srcs: [":com.android.art.testing"], + cmd: art_check_apex_gen_stem + + " --flavor testing" + + " $(in)" + + " && touch $(out)", + out: ["art-check-testing-apex-gen.dummy"], +} + +cc_prebuilt_binary { + name: "art-check-testing-apex-gen-fakebin", + defaults: ["art-check-apex-gen-fakebin-defaults"], + srcs: [":art-check-testing-apex-gen"], +} diff --git a/build/apex/art_apex_boot_integrity.rc b/build/apex/art_apex_boot_integrity.rc new file mode 100644 index 0000000..92f616b --- /dev/null +++ b/build/apex/art_apex_boot_integrity.rc @@ -0,0 +1,21 @@ +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Check that boot classpath files in /data/dalvik-cache have fsverity +# protection + +on post-fs-data + # TODO: Use apex path once feature is implemented. + exec - root -- /system/bin/art_apex_boot_integrity diff --git a/build/apex/art_apex_boot_integrity.sh b/build/apex/art_apex_boot_integrity.sh new file mode 100644 index 0000000..36d0f7f --- /dev/null +++ b/build/apex/art_apex_boot_integrity.sh @@ -0,0 +1,55 @@ +#!/system/bin/sh + +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +alias log_info="log -t art_apex -p i" +alias log_error="log -t art_apex -p f" + +log_info "=== ART pre-boot integrity checks ===" + +# Measure (and enable) fsverity to see if things are installed. Enable is not +# idempotent, and we'd need to parse the error string to see whether it says +# data was installed. Rather do a two-step. +FILES=`find /data/dalvik-cache -type f -a -name 'system@framework@boot*' -o name 'system@framework@*jar*'` + +if [ ! -f "/system/bin/fsverity" ] ; then + log_error "Device is not fsverity-enabled." + rm -f $FILES + exit 0 +fi + +for FILE in $FILES ; do + if [ ! -f "$FILE" ] ; then + continue # May have deleted already. + fi + + # Check for fsverity protection. + fsverity measure $FILE || \ + ENABLE_MSG=`fsverity enable $FILE 2>&1` || \ + { + # No installed data, can't enable - clean up. + # Note: to avoid side effects, only delete the tested files. To avoid + # understanding arches here, delete all, even if that may delete + # too aggressively. + log_error "Enable failed: $ENABLE_MSG" ; + rm -f $FILES ; + exit 1 ; + } + + # Check for integrity. + INTEGRITY_MSG=`dd if=$FILE of=/dev/null bs=4k 2>&1` || \ + { log_error "Integrity failed: $INTEGRITY_MSG" ; rm -f $FILES ; exit 2 ; } +done diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py new file mode 100755 index 0000000..358ef82 --- /dev/null +++ b/build/apex/art_apex_test.py @@ -0,0 +1,1305 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2019 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import fnmatch +import logging +import os +import os.path +import subprocess +import sys +import zipfile + +logging.basicConfig(format='%(message)s') + +# Flavors of ART APEX package. +FLAVOR_RELEASE = 'release' +FLAVOR_DEBUG = 'debug' +FLAVOR_TESTING = 'testing' +FLAVOR_AUTO = 'auto' +FLAVORS_ALL = [FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING, FLAVOR_AUTO] + +# Bitness options for APEX package +BITNESS_32 = '32' +BITNESS_64 = '64' +BITNESS_MULTILIB = 'multilib' +BITNESS_AUTO = 'auto' +BITNESS_ALL = [BITNESS_32, BITNESS_64, BITNESS_MULTILIB, BITNESS_AUTO] + +# Architectures supported by APEX packages. +ARCHS = ["arm", "arm64", "x86", "x86_64"] + +# Directory containing ART tests within an ART APEX (if the package includes +# any). ART test executables are installed in `bin/art/`. Segregating +# tests by architecture is useful on devices supporting more than one +# architecture, as it permits testing all of them using a single ART APEX +# package. +ART_TEST_DIR = 'bin/art' + + +# Test if a given variable is set to a string "true". +def isEnvTrue(var): + return var in os.environ and os.environ[var] == 'true' + + +class FSObject: + def __init__(self, name, is_dir, is_exec, is_symlink, size): + self.name = name + self.is_dir = is_dir + self.is_exec = is_exec + self.is_symlink = is_symlink + self.size = size + + def __str__(self): + return '%s(dir=%r,exec=%r,symlink=%r,size=%d)' \ + % (self.name, self.is_dir, self.is_exec, self.is_symlink, self.size) + + +class TargetApexProvider: + def __init__(self, apex, tmpdir, debugfs): + self._tmpdir = tmpdir + self._debugfs = debugfs + self._folder_cache = {} + self._payload = os.path.join(self._tmpdir, 'apex_payload.img') + # Extract payload to tmpdir. + apex_zip = zipfile.ZipFile(apex) + apex_zip.extract('apex_payload.img', tmpdir) + + def __del__(self): + # Delete temps. + if os.path.exists(self._payload): + os.remove(self._payload) + + def get(self, path): + apex_dir, name = os.path.split(path) + if not apex_dir: + apex_dir = '.' + apex_map = self.read_dir(apex_dir) + return apex_map[name] if name in apex_map else None + + def read_dir(self, apex_dir): + if apex_dir in self._folder_cache: + return self._folder_cache[apex_dir] + # Cannot use check_output as it will annoy with stderr. + process = subprocess.Popen([self._debugfs, '-R', 'ls -l -p %s' % apex_dir, self._payload], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + stdout, _ = process.communicate() + res = str(stdout) + apex_map = {} + # Debugfs output looks like this: + # debugfs 1.44.4 (18-Aug-2018) + # /12/040755/0/2000/.// + # /2/040755/1000/1000/..// + # /13/100755/0/2000/dalvikvm32/28456/ + # /14/100755/0/2000/dexoptanalyzer/20396/ + # /15/100755/0/2000/linker/1152724/ + # /16/100755/0/2000/dex2oat/563508/ + # /17/100755/0/2000/linker64/1605424/ + # /18/100755/0/2000/profman/85304/ + # /19/100755/0/2000/dalvikvm64/28576/ + # | | | | | | + # | | | #- gid #- name #- size + # | | #- uid + # | #- type and permission bits + # #- inode nr (?) + # + # Note: could break just on '/' to avoid names with newlines. + for line in res.split("\n"): + if not line: + continue + comps = line.split('/') + if len(comps) != 8: + logging.warning('Could not break and parse line \'%s\'', line) + continue + bits = comps[2] + name = comps[5] + size_str = comps[6] + # Use a negative value as an indicator of undefined/unknown size. + size = int(size_str) if size_str != '' else -1 + if len(bits) != 6: + logging.warning('Dont understand bits \'%s\'', bits) + continue + is_dir = bits[1] == '4' + + def is_exec_bit(ch): + return int(ch) & 1 == 1 + + is_exec = is_exec_bit(bits[3]) and is_exec_bit(bits[4]) and is_exec_bit(bits[5]) + is_symlink = bits[1] == '2' + apex_map[name] = FSObject(name, is_dir, is_exec, is_symlink, size) + self._folder_cache[apex_dir] = apex_map + return apex_map + + +class TargetFlattenedApexProvider: + def __init__(self, apex): + self._folder_cache = {} + self._apex = apex + + def get(self, path): + apex_dir, name = os.path.split(path) + if not apex_dir: + apex_dir = '.' + apex_map = self.read_dir(apex_dir) + return apex_map[name] if name in apex_map else None + + def read_dir(self, apex_dir): + if apex_dir in self._folder_cache: + return self._folder_cache[apex_dir] + apex_map = {} + dirname = os.path.join(self._apex, apex_dir) + if os.path.exists(dirname): + for basename in os.listdir(dirname): + filepath = os.path.join(dirname, basename) + is_dir = os.path.isdir(filepath) + is_exec = os.access(filepath, os.X_OK) + is_symlink = os.path.islink(filepath) + if is_symlink: + # Report the length of the symlink's target's path as file size, like `ls`. + size = len(os.readlink(filepath)) + else: + size = os.path.getsize(filepath) + apex_map[basename] = FSObject(basename, is_dir, is_exec, is_symlink, size) + self._folder_cache[apex_dir] = apex_map + return apex_map + + +class HostApexProvider: + def __init__(self, apex, tmpdir): + self._tmpdir = tmpdir + self._folder_cache = {} + self._payload = os.path.join(self._tmpdir, 'apex_payload.zip') + # Extract payload to tmpdir. + apex_zip = zipfile.ZipFile(apex) + apex_zip.extract('apex_payload.zip', tmpdir) + + def __del__(self): + # Delete temps. + if os.path.exists(self._payload): + os.remove(self._payload) + + def get(self, path): + apex_dir, name = os.path.split(path) + if not apex_dir: + apex_dir = '' + apex_map = self.read_dir(apex_dir) + return apex_map[name] if name in apex_map else None + + def read_dir(self, apex_dir): + if apex_dir in self._folder_cache: + return self._folder_cache[apex_dir] + if not self._folder_cache: + self.parse_zip() + if apex_dir in self._folder_cache: + return self._folder_cache[apex_dir] + return {} + + def parse_zip(self): + apex_zip = zipfile.ZipFile(self._payload) + infos = apex_zip.infolist() + for zipinfo in infos: + path = zipinfo.filename + + # Assume no empty file is stored. + assert path + + def get_octal(val, index): + return (val >> (index * 3)) & 0x7 + + def bits_is_exec(val): + # TODO: Enforce group/other, too? + return get_octal(val, 2) & 1 == 1 + + is_zipinfo = True + while path: + apex_dir, base = os.path.split(path) + # TODO: If directories are stored, base will be empty. + + if apex_dir not in self._folder_cache: + self._folder_cache[apex_dir] = {} + dir_map = self._folder_cache[apex_dir] + if base not in dir_map: + if is_zipinfo: + bits = (zipinfo.external_attr >> 16) & 0xFFFF + is_dir = get_octal(bits, 4) == 4 + is_symlink = get_octal(bits, 4) == 2 + is_exec = bits_is_exec(bits) + size = zipinfo.file_size + else: + is_exec = False # Seems we can't get this easily? + is_symlink = False + is_dir = True + # Use a negative value as an indicator of undefined/unknown size. + size = -1 + dir_map[base] = FSObject(base, is_dir, is_exec, is_symlink, size) + is_zipinfo = False + path = apex_dir + + +# DO NOT USE DIRECTLY! This is an "abstract" base class. +class Checker: + def __init__(self, provider): + self._provider = provider + self._errors = 0 + self._expected_file_globs = set() + + def fail(self, msg, *fail_args): + self._errors += 1 + logging.error(msg, *fail_args) + + def error_count(self): + return self._errors + + def reset_errors(self): + self._errors = 0 + + def is_file(self, path): + fs_object = self._provider.get(path) + if fs_object is None: + return False, 'Could not find %s' + if fs_object.is_dir: + return False, '%s is a directory' + return True, '' + + def is_dir(self, path): + fs_object = self._provider.get(path) + if fs_object is None: + return False, 'Could not find %s' + if not fs_object.is_dir: + return False, '%s is not a directory' + return True, '' + + def check_file(self, path): + ok, msg = self.is_file(path) + if not ok: + self.fail(msg, path) + self._expected_file_globs.add(path) + return ok + + def check_executable(self, filename): + path = 'bin/%s' % filename + if not self.check_file(path): + return + if not self._provider.get(path).is_exec: + self.fail('%s is not executable', path) + + def check_executable_symlink(self, filename): + path = 'bin/%s' % filename + fs_object = self._provider.get(path) + if fs_object is None: + self.fail('Could not find %s', path) + return + if fs_object.is_dir: + self.fail('%s is a directory', path) + return + if not fs_object.is_symlink: + self.fail('%s is not a symlink', path) + self._expected_file_globs.add(path) + + def arch_dirs_for_path(self, path): + # Look for target-specific subdirectories for the given directory path. + # This is needed because the list of build targets is not propagated + # to this script. + # + # TODO(b/123602136): Pass build target information to this script and fix + # all places where this function in used (or similar workarounds). + dirs = [] + for arch in ARCHS: + dir = '%s/%s' % (path, arch) + found, _ = self.is_dir(dir) + if found: + dirs.append(dir) + return dirs + + def check_art_test_executable(self, filename): + dirs = self.arch_dirs_for_path(ART_TEST_DIR) + if not dirs: + self.fail('ART test binary missing: %s', filename) + for dir in dirs: + test_path = '%s/%s' % (dir, filename) + self._expected_file_globs.add(test_path) + if not self._provider.get(test_path).is_exec: + self.fail('%s is not executable', test_path) + + def check_single_library(self, filename): + lib_path = 'lib/%s' % filename + lib64_path = 'lib64/%s' % filename + lib_is_file, _ = self.is_file(lib_path) + if lib_is_file: + self._expected_file_globs.add(lib_path) + lib64_is_file, _ = self.is_file(lib64_path) + if lib64_is_file: + self._expected_file_globs.add(lib64_path) + if not lib_is_file and not lib64_is_file: + self.fail('Library missing: %s', filename) + + def check_dexpreopt(self, basename): + dirs = self.arch_dirs_for_path('javalib') + for dir in dirs: + for ext in ['art', 'oat', 'vdex']: + self.check_file('%s/%s.%s' % (dir, basename, ext)) + + def check_java_library(self, basename): + return self.check_file('javalib/%s.jar' % basename) + + def ignore_path(self, path_glob): + self._expected_file_globs.add(path_glob) + + def check_optional_art_test_executable(self, filename): + for arch in ARCHS: + self.ignore_path('%s/%s/%s' % (ART_TEST_DIR, arch, filename)) + + def check_no_superfluous_files(self, dir_path): + paths = [] + for name in sorted(self._provider.read_dir(dir_path).keys()): + if name not in ('.', '..'): + paths.append(os.path.join(dir_path, name)) + expected_paths = set() + dir_prefix = dir_path + '/' + for path_glob in self._expected_file_globs: + expected_paths |= set(fnmatch.filter(paths, path_glob)) + # If there are globs in subdirectories of dir_path we want to match their + # path segments at this directory level. + if path_glob.startswith(dir_prefix): + subpath = path_glob[len(dir_prefix):] + subpath_first_segment, _, _ = subpath.partition('/') + expected_paths |= set(fnmatch.filter(paths, dir_prefix + subpath_first_segment)) + for unexpected_path in set(paths) - expected_paths: + self.fail('Unexpected file \'%s\'', unexpected_path) + + # Just here for docs purposes, even if it isn't good Python style. + + def check_symlinked_multilib_executable(self, filename): + """Check bin/filename32, and/or bin/filename64, with symlink bin/filename.""" + raise NotImplementedError + + def check_symlinked_first_executable(self, filename): + """Check bin/filename32, and/or bin/filename64, with symlink bin/filename.""" + raise NotImplementedError + + def check_multilib_executable(self, filename): + """Check bin/filename for 32 bit, and/or bin/filename64.""" + raise NotImplementedError + + def check_first_executable(self, filename): + """Check bin/filename for 32 bit, and/or bin/filename64.""" + raise NotImplementedError + + def check_native_library(self, basename): + """Check lib/basename.so, and/or lib64/basename.so.""" + raise NotImplementedError + + def check_optional_native_library(self, basename_glob): + """Allow lib/basename.so and/or lib64/basename.so to exist.""" + raise NotImplementedError + + def check_prefer64_library(self, basename): + """Check lib64/basename.so, or lib/basename.so on 32 bit only.""" + raise NotImplementedError + + +class Arch32Checker(Checker): + def check_symlinked_multilib_executable(self, filename): + self.check_executable('%s32' % filename) + self.check_executable_symlink(filename) + + def check_symlinked_first_executable(self, filename): + self.check_executable('%s32' % filename) + self.check_executable_symlink(filename) + + def check_multilib_executable(self, filename): + self.check_executable('%s32' % filename) + + def check_first_executable(self, filename): + self.check_executable('%s32' % filename) + + def check_native_library(self, basename): + # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve + # the precision of this test? + self.check_file('lib/%s.so' % basename) + + def check_optional_native_library(self, basename_glob): + self.ignore_path('lib/%s.so' % basename_glob) + + def check_prefer64_library(self, basename): + self.check_native_library(basename) + + +class Arch64Checker(Checker): + def check_symlinked_multilib_executable(self, filename): + self.check_executable('%s64' % filename) + self.check_executable_symlink(filename) + + def check_symlinked_first_executable(self, filename): + self.check_executable('%s64' % filename) + self.check_executable_symlink(filename) + + def check_multilib_executable(self, filename): + self.check_executable('%s64' % filename) + + def check_first_executable(self, filename): + self.check_executable('%s64' % filename) + + def check_native_library(self, basename): + # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve + # the precision of this test? + self.check_file('lib64/%s.so' % basename) + + def check_optional_native_library(self, basename_glob): + self.ignore_path('lib64/%s.so' % basename_glob) + + def check_prefer64_library(self, basename): + self.check_native_library(basename) + + +class MultilibChecker(Checker): + def check_symlinked_multilib_executable(self, filename): + self.check_executable('%s32' % filename) + self.check_executable('%s64' % filename) + self.check_executable_symlink(filename) + + def check_symlinked_first_executable(self, filename): + self.check_executable('%s64' % filename) + self.check_executable_symlink(filename) + + def check_multilib_executable(self, filename): + self.check_executable('%s64' % filename) + self.check_executable('%s32' % filename) + + def check_first_executable(self, filename): + self.check_executable('%s64' % filename) + + def check_native_library(self, basename): + # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve + # the precision of this test? + self.check_file('lib/%s.so' % basename) + self.check_file('lib64/%s.so' % basename) + + def check_optional_native_library(self, basename_glob): + self.ignore_path('lib/%s.so' % basename_glob) + self.ignore_path('lib64/%s.so' % basename_glob) + + def check_prefer64_library(self, basename): + self.check_file('lib64/%s.so' % basename) + + +class ReleaseChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Release Checker' + + def run(self): + # Check the Protocol Buffers APEX manifest. + self._checker.check_file('apex_manifest.pb') + + # Check binaries for ART. + self._checker.check_first_executable('dex2oat') + self._checker.check_executable('dexdump') + self._checker.check_executable('dexlist') + self._checker.check_executable('dexoptanalyzer') + self._checker.check_executable('profman') + self._checker.check_symlinked_multilib_executable('dalvikvm') + + # Check exported libraries for ART. + self._checker.check_native_library('libdexfile_external') + self._checker.check_native_library('libnativebridge') + self._checker.check_native_library('libnativehelper') + self._checker.check_native_library('libnativeloader') + + # Check internal libraries for ART. + self._checker.check_native_library('libadbconnection') + self._checker.check_native_library('libart') + self._checker.check_native_library('libart-compiler') + self._checker.check_native_library('libart-dexlayout') + self._checker.check_native_library('libart-disassembler') + self._checker.check_native_library('libartbase') + self._checker.check_native_library('libartpalette') + self._checker.check_native_library('libdexfile') + self._checker.check_native_library('libdexfile_support') + self._checker.check_native_library('libopenjdkjvm') + self._checker.check_native_library('libopenjdkjvmti') + self._checker.check_native_library('libprofile') + self._checker.check_native_library('libsigchain') + + # Check java libraries for Managed Core Library. + self._checker.check_java_library('apache-xml') + self._checker.check_java_library('bouncycastle') + self._checker.check_java_library('core-icu4j') + self._checker.check_java_library('core-libart') + self._checker.check_java_library('core-oj') + self._checker.check_java_library('okhttp') + if isEnvTrue('EMMA_INSTRUMENT_FRAMEWORK'): + # In coverage builds jacoco is added to the list of ART apex jars. + self._checker.check_java_library('jacocoagent') + + # Check internal native libraries for Managed Core Library. + self._checker.check_native_library('libjavacore') + self._checker.check_native_library('libopenjdk') + + # Check internal native library dependencies. + # + # Any internal dependency not listed here will cause a failure in + # NoSuperfluousLibrariesChecker. Internal dependencies are generally just + # implementation details, but in the release package we want to track them + # because a) they add to the package size and the RAM usage (in particular + # if the library is also present in /system or another APEX and hence might + # get loaded twice through linker namespace separation), and b) we need to + # catch invalid dependencies on /system or other APEXes that should go + # through an exported library with stubs (b/128708192 tracks implementing a + # better approach for that). + self._checker.check_native_library('libbacktrace') + self._checker.check_native_library('libbase') + self._checker.check_native_library('libc++') + self._checker.check_native_library('libdt_fd_forward') + self._checker.check_native_library('libdt_socket') + self._checker.check_native_library('libjdwp') + self._checker.check_native_library('liblzma') + self._checker.check_native_library('libnpt') + self._checker.check_native_library('libunwindstack') + self._checker.check_native_library('libziparchive') + self._checker.check_optional_native_library('libvixl') # Only on ARM/ARM64 + + # Allow extra dependencies that appear in ASAN builds. + self._checker.check_optional_native_library('libclang_rt.asan*') + self._checker.check_optional_native_library('libclang_rt.hwasan*') + self._checker.check_optional_native_library('libclang_rt.ubsan*') + + # Check dexpreopt files for libcore bootclasspath jars. + self._checker.check_dexpreopt('boot') + self._checker.check_dexpreopt('boot-apache-xml') + self._checker.check_dexpreopt('boot-bouncycastle') + self._checker.check_dexpreopt('boot-core-icu4j') + self._checker.check_dexpreopt('boot-core-libart') + self._checker.check_dexpreopt('boot-okhttp') + if isEnvTrue('EMMA_INSTRUMENT_FRAMEWORK'): + # In coverage builds the ART boot image includes jacoco. + self._checker.check_dexpreopt('boot-jacocoagent') + +class ReleaseTargetChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Release (Target) Checker' + + def run(self): + # We don't check for the presence of the JSON APEX manifest (file + # `apex_manifest.json`, only present in target APEXes), as it is only + # included for compatibility reasons with Android Q and will likely be + # removed in Android R. + + # Check binaries for ART. + self._checker.check_executable('oatdump') + self._checker.check_multilib_executable('dex2oat') + + # Check internal libraries for ART. + self._checker.check_prefer64_library('libart-disassembler') + self._checker.check_native_library('libperfetto_hprof') + + # Check exported native libraries for Managed Core Library. + self._checker.check_native_library('libandroidicu') + self._checker.check_native_library('libandroidio') + + # Check internal native library dependencies. + self._checker.check_native_library('libcrypto') + self._checker.check_native_library('libexpat') + self._checker.check_native_library('libicui18n') + self._checker.check_native_library('libicuuc') + self._checker.check_native_library('libicu_jni') + self._checker.check_native_library('libpac') + self._checker.check_native_library('libz') + + # TODO(b/139046641): Fix proper 2nd arch checks. For now, just ignore these + # directories. + self._checker.ignore_path('bin/arm') + self._checker.ignore_path('lib/arm') + self._checker.ignore_path('lib64/arm') + + +class ReleaseHostChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Release (Host) Checker' + + def run(self): + # Check binaries for ART. + self._checker.check_executable('hprof-conv') + self._checker.check_symlinked_first_executable('dex2oatd') + self._checker.check_symlinked_first_executable('dex2oat') + + # Check exported native libraries for Managed Core Library. + self._checker.check_native_library('libandroidicu-host') + self._checker.check_native_library('libandroidio') + + # Check internal libraries for Managed Core Library. + self._checker.check_native_library('libexpat-host') + self._checker.check_native_library('libicui18n-host') + self._checker.check_native_library('libicuuc-host') + self._checker.check_native_library('libicu_jni') + self._checker.check_native_library('libz-host') + + +class DebugChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Debug Checker' + + def run(self): + # Check binaries for ART. + self._checker.check_executable('dexdiag') + self._checker.check_executable('dexanalyze') + self._checker.check_executable('dexlayout') + self._checker.check_symlinked_multilib_executable('imgdiag') + + # Check debug binaries for ART. + self._checker.check_executable('dexlayoutd') + self._checker.check_executable('dexoptanalyzerd') + self._checker.check_symlinked_multilib_executable('imgdiagd') + self._checker.check_executable('profmand') + + # Check internal libraries for ART. + self._checker.check_native_library('libadbconnectiond') + self._checker.check_native_library('libart-disassembler') + self._checker.check_native_library('libartbased') + self._checker.check_native_library('libartd') + self._checker.check_native_library('libartd-compiler') + self._checker.check_native_library('libartd-dexlayout') + self._checker.check_native_library('libartd-disassembler') + self._checker.check_native_library('libdexfiled') + self._checker.check_native_library('libopenjdkjvmd') + self._checker.check_native_library('libopenjdkjvmtid') + self._checker.check_native_library('libprofiled') + + # Check internal libraries for Managed Core Library. + self._checker.check_native_library('libopenjdkd') + + +class DebugTargetChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Debug (Target) Checker' + + def run(self): + # Check ART debug binaries. + self._checker.check_multilib_executable('dex2oatd') + self._checker.check_multilib_executable('dex2oat') + self._checker.check_executable('oatdumpd') + + # Check ART internal libraries. + self._checker.check_native_library('libdexfiled_external') + self._checker.check_native_library('libperfetto_hprofd') + + # Check internal native library dependencies. + # + # Like in the release package, we check that we don't get other dependencies + # besides those listed here. In this case the concern is not bloat, but + # rather that we don't get behavioural differences between user (release) + # and userdebug/eng builds, which could happen if the debug package has + # duplicate library instances where releases don't. In other words, it's + # uncontroversial to add debug-only dependencies, as long as they don't make + # assumptions on having a single global state (ideally they should have + # double_loadable:true, cf. go/double_loadable). Also, like in the release + # package we need to look out for dependencies that should go through + # exported library stubs (until b/128708192 is fixed). + self._checker.check_optional_native_library('libvixld') # Only on ARM/ARM64 + self._checker.check_prefer64_library('libmeminfo') + self._checker.check_prefer64_library('libprocinfo') + + +class TestingTargetChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'Testing (Target) Checker' + + def run(self): + # Check cmdline tests. + self._checker.check_optional_art_test_executable('cmdline_parser_test') + + # Check compiler tests. + self._checker.check_art_test_executable('atomic_dex_ref_map_test') + self._checker.check_art_test_executable('bounds_check_elimination_test') + self._checker.check_art_test_executable('codegen_test') + self._checker.check_art_test_executable('compiled_method_storage_test') + self._checker.check_art_test_executable('data_type_test') + self._checker.check_art_test_executable('dedupe_set_test') + self._checker.check_art_test_executable('dominator_test') + self._checker.check_art_test_executable('dwarf_test') + self._checker.check_art_test_executable('exception_test') + self._checker.check_art_test_executable('find_loops_test') + self._checker.check_art_test_executable('graph_checker_test') + self._checker.check_art_test_executable('graph_test') + self._checker.check_art_test_executable('gvn_test') + self._checker.check_art_test_executable('induction_var_analysis_test') + self._checker.check_art_test_executable('induction_var_range_test') + self._checker.check_art_test_executable('jni_cfi_test') + self._checker.check_art_test_executable('jni_compiler_test') + self._checker.check_art_test_executable('licm_test') + self._checker.check_art_test_executable('linker_patch_test') + self._checker.check_art_test_executable('live_interval_test') + self._checker.check_art_test_executable('load_store_analysis_test') + self._checker.check_art_test_executable('load_store_elimination_test') + self._checker.check_art_test_executable('loop_optimization_test') + self._checker.check_art_test_executable('nodes_test') + self._checker.check_art_test_executable('nodes_vector_test') + self._checker.check_art_test_executable('optimizing_cfi_test') + self._checker.check_art_test_executable('output_stream_test') + self._checker.check_art_test_executable('parallel_move_test') + self._checker.check_art_test_executable('pretty_printer_test') + self._checker.check_art_test_executable('reference_type_propagation_test') + self._checker.check_art_test_executable('scheduler_test') + self._checker.check_art_test_executable('select_generator_test') + self._checker.check_art_test_executable('side_effects_test') + self._checker.check_art_test_executable('src_map_elem_test') + self._checker.check_art_test_executable('ssa_liveness_analysis_test') + self._checker.check_art_test_executable('ssa_test') + self._checker.check_art_test_executable('stack_map_test') + self._checker.check_art_test_executable('superblock_cloner_test') + self._checker.check_art_test_executable('suspend_check_test') + self._checker.check_art_test_executable('swap_space_test') + # These tests depend on a specific code generator and are conditionally included. + self._checker.check_optional_art_test_executable('constant_folding_test') + self._checker.check_optional_art_test_executable('dead_code_elimination_test') + self._checker.check_optional_art_test_executable('linearize_test') + self._checker.check_optional_art_test_executable('live_ranges_test') + self._checker.check_optional_art_test_executable('liveness_test') + self._checker.check_optional_art_test_executable('managed_register_arm64_test') + self._checker.check_optional_art_test_executable('managed_register_arm_test') + self._checker.check_optional_art_test_executable('managed_register_x86_64_test') + self._checker.check_optional_art_test_executable('managed_register_x86_test') + self._checker.check_optional_art_test_executable('register_allocator_test') + + # Check dex2oat tests. + self._checker.check_art_test_executable('compiler_driver_test') + self._checker.check_art_test_executable('dex2oat_image_test') + self._checker.check_art_test_executable('dex2oat_test') + self._checker.check_art_test_executable('dex2oat_vdex_test') + self._checker.check_art_test_executable('dex_to_dex_decompiler_test') + self._checker.check_art_test_executable('elf_writer_test') + self._checker.check_art_test_executable('image_test') + self._checker.check_art_test_executable('image_write_read_test') + self._checker.check_art_test_executable('index_bss_mapping_encoder_test') + self._checker.check_art_test_executable('multi_oat_relative_patcher_test') + self._checker.check_art_test_executable('oat_writer_test') + self._checker.check_art_test_executable('verifier_deps_test') + # These tests depend on a specific code generator and are conditionally included. + self._checker.check_optional_art_test_executable('relative_patcher_arm64_test') + self._checker.check_optional_art_test_executable('relative_patcher_thumb2_test') + self._checker.check_optional_art_test_executable('relative_patcher_x86_64_test') + self._checker.check_optional_art_test_executable('relative_patcher_x86_test') + + # Check dexanalyze tests. + self._checker.check_optional_art_test_executable('dexanalyze_test') + + # Check dexdiag tests. + self._checker.check_optional_art_test_executable('dexdiag_test') + + # Check dexdump tests. + self._checker.check_art_test_executable('dexdump_test') + + # Check dexlayout tests. + self._checker.check_optional_art_test_executable('dexlayout_test') + + # Check dexlist tests. + self._checker.check_art_test_executable('dexlist_test') + + # Check dexoptanalyzer tests. + self._checker.check_art_test_executable('dexoptanalyzer_test') + + # Check imgdiag tests. + self._checker.check_art_test_executable('imgdiag_test') + + # Check libartbase tests. + self._checker.check_art_test_executable('arena_allocator_test') + self._checker.check_art_test_executable('bit_field_test') + self._checker.check_art_test_executable('bit_memory_region_test') + self._checker.check_art_test_executable('bit_string_test') + self._checker.check_art_test_executable('bit_struct_test') + self._checker.check_art_test_executable('bit_table_test') + self._checker.check_art_test_executable('bit_utils_test') + self._checker.check_art_test_executable('bit_vector_test') + self._checker.check_art_test_executable('fd_file_test') + self._checker.check_art_test_executable('file_utils_test') + self._checker.check_art_test_executable('hash_set_test') + self._checker.check_art_test_executable('hex_dump_test') + self._checker.check_art_test_executable('histogram_test') + self._checker.check_art_test_executable('indenter_test') + self._checker.check_art_test_executable('instruction_set_test') + self._checker.check_art_test_executable('intrusive_forward_list_test') + self._checker.check_art_test_executable('leb128_test') + self._checker.check_art_test_executable('logging_test') + self._checker.check_art_test_executable('mem_map_test') + self._checker.check_art_test_executable('membarrier_test') + self._checker.check_art_test_executable('memfd_test') + self._checker.check_art_test_executable('memory_region_test') + self._checker.check_art_test_executable('safe_copy_test') + self._checker.check_art_test_executable('scoped_flock_test') + self._checker.check_art_test_executable('time_utils_test') + self._checker.check_art_test_executable('transform_array_ref_test') + self._checker.check_art_test_executable('transform_iterator_test') + self._checker.check_art_test_executable('utils_test') + self._checker.check_art_test_executable('variant_map_test') + self._checker.check_art_test_executable('zip_archive_test') + + # Check libartpalette tests. + self._checker.check_art_test_executable('palette_test') + + # Check libdexfile tests. + self._checker.check_art_test_executable('art_dex_file_loader_test') + self._checker.check_art_test_executable('art_libdexfile_support_tests') + self._checker.check_art_test_executable('class_accessor_test') + self._checker.check_art_test_executable('code_item_accessors_test') + self._checker.check_art_test_executable('compact_dex_file_test') + self._checker.check_art_test_executable('compact_offset_table_test') + self._checker.check_art_test_executable('descriptors_names_test') + self._checker.check_art_test_executable('dex_file_loader_test') + self._checker.check_art_test_executable('dex_file_verifier_test') + self._checker.check_art_test_executable('dex_instruction_test') + self._checker.check_art_test_executable('primitive_test') + self._checker.check_art_test_executable('string_reference_test') + self._checker.check_art_test_executable('test_dex_file_builder_test') + self._checker.check_art_test_executable('type_lookup_table_test') + self._checker.check_art_test_executable('utf_test') + + # Check libprofile tests. + self._checker.check_optional_art_test_executable('profile_boot_info_test') + self._checker.check_optional_art_test_executable('profile_compilation_info_test') + + # Check oatdump tests. + self._checker.check_art_test_executable('oatdump_app_test') + self._checker.check_art_test_executable('oatdump_image_test') + self._checker.check_art_test_executable('oatdump_test') + + # Check profman tests. + self._checker.check_art_test_executable('profile_assistant_test') + + # Check runtime compiler tests. + self._checker.check_art_test_executable('module_exclusion_test') + self._checker.check_art_test_executable('reflection_test') + + # Check runtime tests. + self._checker.check_art_test_executable('arch_test') + self._checker.check_art_test_executable('barrier_test') + self._checker.check_art_test_executable('card_table_test') + self._checker.check_art_test_executable('cha_test') + self._checker.check_art_test_executable('class_linker_test') + self._checker.check_art_test_executable('class_loader_context_test') + self._checker.check_art_test_executable('class_table_test') + self._checker.check_art_test_executable('compiler_filter_test') + self._checker.check_art_test_executable('dex_cache_test') + self._checker.check_art_test_executable('dlmalloc_space_random_test') + self._checker.check_art_test_executable('dlmalloc_space_static_test') + self._checker.check_art_test_executable('entrypoints_order_test') + self._checker.check_art_test_executable('exec_utils_test') + self._checker.check_art_test_executable('gtest_test') + self._checker.check_art_test_executable('handle_scope_test') + self._checker.check_art_test_executable('heap_test') + self._checker.check_art_test_executable('heap_verification_test') + self._checker.check_art_test_executable('hidden_api_test') + self._checker.check_art_test_executable('image_space_test') + self._checker.check_art_test_executable('immune_spaces_test') + self._checker.check_art_test_executable('imtable_test') + self._checker.check_art_test_executable('indirect_reference_table_test') + self._checker.check_art_test_executable('instruction_set_features_arm64_test') + self._checker.check_art_test_executable('instruction_set_features_arm_test') + self._checker.check_art_test_executable('instruction_set_features_test') + self._checker.check_art_test_executable('instruction_set_features_x86_64_test') + self._checker.check_art_test_executable('instruction_set_features_x86_test') + self._checker.check_art_test_executable('instrumentation_test') + self._checker.check_art_test_executable('intern_table_test') + self._checker.check_art_test_executable('java_vm_ext_test') + self._checker.check_art_test_executable('jit_memory_region_test') + self._checker.check_art_test_executable('jni_internal_test') + self._checker.check_art_test_executable('large_object_space_test') + self._checker.check_art_test_executable('math_entrypoints_test') + self._checker.check_art_test_executable('memcmp16_test') + self._checker.check_art_test_executable('method_handles_test') + self._checker.check_art_test_executable('method_type_test') + self._checker.check_art_test_executable('method_verifier_test') + self._checker.check_art_test_executable('mod_union_table_test') + self._checker.check_art_test_executable('monitor_pool_test') + self._checker.check_art_test_executable('monitor_test') + self._checker.check_art_test_executable('mutex_test') + self._checker.check_art_test_executable('oat_file_assistant_test') + self._checker.check_art_test_executable('oat_file_test') + self._checker.check_art_test_executable('object_test') + self._checker.check_art_test_executable('parsed_options_test') + self._checker.check_art_test_executable('prebuilt_tools_test') + self._checker.check_art_test_executable('profiling_info_test') + self._checker.check_art_test_executable('profile_saver_test') + self._checker.check_art_test_executable('proxy_test') + self._checker.check_art_test_executable('quick_trampoline_entrypoints_test') + self._checker.check_art_test_executable('reference_queue_test') + self._checker.check_art_test_executable('reference_table_test') + self._checker.check_art_test_executable('reg_type_test') + self._checker.check_art_test_executable('rosalloc_space_random_test') + self._checker.check_art_test_executable('rosalloc_space_static_test') + self._checker.check_art_test_executable('runtime_callbacks_test') + self._checker.check_art_test_executable('runtime_test') + self._checker.check_art_test_executable('safe_math_test') + self._checker.check_art_test_executable('space_bitmap_test') + self._checker.check_art_test_executable('space_create_test') + self._checker.check_art_test_executable('stub_test') + self._checker.check_art_test_executable('subtype_check_info_test') + self._checker.check_art_test_executable('subtype_check_test') + self._checker.check_art_test_executable('system_weak_test') + self._checker.check_art_test_executable('task_processor_test') + self._checker.check_art_test_executable('thread_pool_test') + self._checker.check_art_test_executable('timing_logger_test') + self._checker.check_art_test_executable('transaction_test') + self._checker.check_art_test_executable('two_runtimes_test') + self._checker.check_art_test_executable('unstarted_runtime_test') + self._checker.check_art_test_executable('var_handle_test') + self._checker.check_art_test_executable('vdex_file_test') + + # Check sigchainlib tests. + self._checker.check_art_test_executable('sigchain_test') + + # Check ART test (internal) libraries. + self._checker.check_native_library('libart-gtest') + self._checker.check_native_library('libartd-simulator-container') + + # Check ART test tools. + self._checker.check_executable('signal_dumper') + + +class NoSuperfluousBinariesChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'No superfluous binaries checker' + + def run(self): + self._checker.check_no_superfluous_files('bin') + + +class NoSuperfluousLibrariesChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'No superfluous libraries checker' + + def run(self): + self._checker.check_no_superfluous_files('javalib') + self._checker.check_no_superfluous_files('lib') + self._checker.check_no_superfluous_files('lib64') + + +class NoSuperfluousArtTestsChecker: + def __init__(self, checker): + self._checker = checker + + def __str__(self): + return 'No superfluous ART tests checker' + + def run(self): + for arch in ARCHS: + self._checker.check_no_superfluous_files('%s/%s' % (ART_TEST_DIR, arch)) + + +class List: + def __init__(self, provider, print_size=False): + self._provider = provider + self._print_size = print_size + + def print_list(self): + + def print_list_rec(path): + apex_map = self._provider.read_dir(path) + if apex_map is None: + return + apex_map = dict(apex_map) + if '.' in apex_map: + del apex_map['.'] + if '..' in apex_map: + del apex_map['..'] + for (_, val) in sorted(apex_map.items()): + val_path = os.path.join(path, val.name) + if self._print_size: + if val.size < 0: + print('[ n/a ] %s' % val_path) + else: + print('[%11d] %s' % (val.size, val_path)) + else: + print(val_path) + if val.is_dir: + print_list_rec(val_path) + + print_list_rec('') + + +class Tree: + def __init__(self, provider, title, print_size=False): + print('%s' % title) + self._provider = provider + self._has_next_list = [] + self._print_size = print_size + + @staticmethod + def get_vertical(has_next_list): + string = '' + for v in has_next_list: + string += '%s ' % ('│' if v else ' ') + return string + + @staticmethod + def get_last_vertical(last): + return '└── ' if last else '├── ' + + def print_tree(self): + + def print_tree_rec(path): + apex_map = self._provider.read_dir(path) + if apex_map is None: + return + apex_map = dict(apex_map) + if '.' in apex_map: + del apex_map['.'] + if '..' in apex_map: + del apex_map['..'] + key_list = list(sorted(apex_map.keys())) + for i, key in enumerate(key_list): + prev = self.get_vertical(self._has_next_list) + last = self.get_last_vertical(i == len(key_list) - 1) + val = apex_map[key] + if self._print_size: + if val.size < 0: + print('%s%s[ n/a ] %s' % (prev, last, val.name)) + else: + print('%s%s[%11d] %s' % (prev, last, val.size, val.name)) + else: + print('%s%s%s' % (prev, last, val.name)) + if val.is_dir: + self._has_next_list.append(i < len(key_list) - 1) + val_path = os.path.join(path, val.name) + print_tree_rec(val_path) + self._has_next_list.pop() + + print_tree_rec('') + + +# Note: do not sys.exit early, for __del__ cleanup. +def art_apex_test_main(test_args): + if test_args.host and test_args.flattened: + logging.error("Both of --host and --flattened set") + return 1 + if test_args.list and test_args.tree: + logging.error("Both of --list and --tree set") + return 1 + if test_args.size and not (test_args.list or test_args.tree): + logging.error("--size set but neither --list nor --tree set") + return 1 + if not test_args.flattened and not test_args.tmpdir: + logging.error("Need a tmpdir.") + return 1 + if not test_args.flattened and not test_args.host and not test_args.debugfs: + logging.error("Need debugfs.") + return 1 + + if test_args.host: + # Host APEX. + if test_args.flavor not in [FLAVOR_DEBUG, FLAVOR_AUTO]: + logging.error("Using option --host with non-Debug APEX") + return 1 + # Host APEX is always a debug flavor (for now). + test_args.flavor = FLAVOR_DEBUG + else: + # Device APEX. + if test_args.flavor == FLAVOR_AUTO: + logging.warning('--flavor=auto, trying to autodetect. This may be incorrect!') + for flavor in [ FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING ]: + flavor_pattern = '*.%s*' % flavor + if fnmatch.fnmatch(test_args.apex, flavor_pattern): + test_args.flavor = flavor + break + if test_args.flavor == FLAVOR_AUTO: + logging.error(' Could not detect APEX flavor, neither \'%s\', \'%s\' nor \'%s\' in \'%s\'', + FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING, test_args.apex) + return 1 + + try: + if test_args.host: + apex_provider = HostApexProvider(test_args.apex, test_args.tmpdir) + else: + if test_args.flattened: + apex_provider = TargetFlattenedApexProvider(test_args.apex) + else: + apex_provider = TargetApexProvider(test_args.apex, test_args.tmpdir, test_args.debugfs) + except (zipfile.BadZipFile, zipfile.LargeZipFile) as e: + logging.error('Failed to create provider: %s', e) + return 1 + + if test_args.tree: + Tree(apex_provider, test_args.apex, test_args.size).print_tree() + return 0 + if test_args.list: + List(apex_provider, test_args.size).print_list() + return 0 + + checkers = [] + if test_args.bitness == BITNESS_AUTO: + logging.warning('--bitness=auto, trying to autodetect. This may be incorrect!') + has_32 = apex_provider.get('lib') is not None + has_64 = apex_provider.get('lib64') is not None + if has_32 and has_64: + logging.warning(' Detected multilib') + test_args.bitness = BITNESS_MULTILIB + elif has_32: + logging.warning(' Detected 32-only') + test_args.bitness = BITNESS_32 + elif has_64: + logging.warning(' Detected 64-only') + test_args.bitness = BITNESS_64 + else: + logging.error(' Could not detect bitness, neither lib nor lib64 contained.') + List(apex_provider).print_list() + return 1 + + if test_args.bitness == BITNESS_32: + base_checker = Arch32Checker(apex_provider) + elif test_args.bitness == BITNESS_64: + base_checker = Arch64Checker(apex_provider) + else: + assert test_args.bitness == BITNESS_MULTILIB + base_checker = MultilibChecker(apex_provider) + + checkers.append(ReleaseChecker(base_checker)) + if test_args.host: + checkers.append(ReleaseHostChecker(base_checker)) + else: + checkers.append(ReleaseTargetChecker(base_checker)) + if test_args.flavor == FLAVOR_DEBUG or test_args.flavor == FLAVOR_TESTING: + checkers.append(DebugChecker(base_checker)) + if not test_args.host: + checkers.append(DebugTargetChecker(base_checker)) + if test_args.flavor == FLAVOR_TESTING: + checkers.append(TestingTargetChecker(base_checker)) + + # These checkers must be last. + checkers.append(NoSuperfluousBinariesChecker(base_checker)) + checkers.append(NoSuperfluousArtTestsChecker(base_checker)) + if not test_args.host: + # We only care about superfluous libraries on target, where their absence + # can be vital to ensure they get picked up from the right package. + checkers.append(NoSuperfluousLibrariesChecker(base_checker)) + + failed = False + for checker in checkers: + logging.info('%s...', checker) + checker.run() + if base_checker.error_count() > 0: + logging.error('%s FAILED', checker) + failed = True + else: + logging.info('%s SUCCEEDED', checker) + base_checker.reset_errors() + + return 1 if failed else 0 + + +def art_apex_test_default(test_parser): + if 'ANDROID_PRODUCT_OUT' not in os.environ: + logging.error('No-argument use requires ANDROID_PRODUCT_OUT') + sys.exit(1) + product_out = os.environ['ANDROID_PRODUCT_OUT'] + if 'ANDROID_HOST_OUT' not in os.environ: + logging.error('No-argument use requires ANDROID_HOST_OUT') + sys.exit(1) + host_out = os.environ['ANDROID_HOST_OUT'] + + test_args = test_parser.parse_args(['dummy']) # For consistency. + test_args.debugfs = '%s/bin/debugfs' % host_out + test_args.tmpdir = '.' + test_args.tree = False + test_args.list = False + test_args.bitness = BITNESS_AUTO + failed = False + + if not os.path.exists(test_args.debugfs): + logging.error("Cannot find debugfs (default path %s). Please build it, e.g., m debugfs", + test_args.debugfs) + sys.exit(1) + + # TODO: Add host support. + # TODO: Add support for flattened APEX packages. + configs = [ + {'name': 'com.android.art.release', 'flavor': FLAVOR_RELEASE, 'host': False}, + {'name': 'com.android.art.debug', 'flavor': FLAVOR_DEBUG, 'host': False}, + {'name': 'com.android.art.testing', 'flavor': FLAVOR_TESTING, 'host': False}, + ] + + for config in configs: + logging.info(config['name']) + # TODO: Host will need different path. + test_args.apex = '%s/system/apex/%s.apex' % (product_out, config['name']) + if not os.path.exists(test_args.apex): + failed = True + logging.error("Cannot find APEX %s. Please build it first.", test_args.apex) + continue + test_args.flavor = config['flavor'] + test_args.host = config['host'] + failed = art_apex_test_main(test_args) != 0 + + if failed: + sys.exit(1) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Check integrity of an ART APEX.') + + parser.add_argument('apex', help='APEX file input') + + parser.add_argument('--host', help='Check as host APEX', action='store_true') + + parser.add_argument('--flattened', help='Check as flattened (target) APEX', action='store_true') + + parser.add_argument('--flavor', help='Check as FLAVOR APEX', choices=FLAVORS_ALL, + default=FLAVOR_AUTO) + + parser.add_argument('--list', help='List all files', action='store_true') + parser.add_argument('--tree', help='Print directory tree', action='store_true') + parser.add_argument('--size', help='Print file sizes', action='store_true') + + parser.add_argument('--tmpdir', help='Directory for temp files') + parser.add_argument('--debugfs', help='Path to debugfs') + + parser.add_argument('--bitness', help='Bitness to check', choices=BITNESS_ALL, + default=BITNESS_AUTO) + + if len(sys.argv) == 1: + art_apex_test_default(parser) + else: + args = parser.parse_args() + + if args is None: + sys.exit(1) + + exit_code = art_apex_test_main(args) + sys.exit(exit_code) diff --git a/build/apex/com.android.art.avbpubkey b/build/apex/com.android.art.avbpubkey new file mode 100644 index 0000000000000000000000000000000000000000..0d9cb497584a056fa7e2f8fb8798faf201ec2369 GIT binary patch literal 1032 zcmV+j1o!&@01yCg#wF>k8sm`Y&JZ~`{gEA`toxQxZt^oyk$cWsUwMrbI?mW1Gfgs8 zs;whTLUtd*-Euf@yjz5NxUJS#9w&ZwlyjD?LA?vNeDz};UJ>$_as!kOpMTrKMFh=;sj# z!O1i1<3sT7lG(;d#9(D?H^x{gN9}hKKAOs~1WuwmpbQV$;PRDP9C3$C|6CCkwi4_J zI+9N@y7VjaO?h8?Yg8_5Gq*(lF)FEgHOOJ7P}fzYXI{jfAMyDIG$7^(sn1a-3K<^^9H7HWd7Mc@w?07X!0TQr&z+O?LeOdIk(9c(#`uHN4$qwC6zIj7k#UI=$AyZ^F^sYlQBO6)Z1hTW)w1 zS7GbH<6re^<&ZvKH)$-3@Ogr%;4%{?Hry30_};UPxI4{HO2Y6!;?tK4ZicQF8!$iB z3_kjYPyA3H-}(R2D8-S&WwU|4h9?%rv+;D42)Y60N=o2n653i``EyIE<)Im9RMsIc zmgZKaf+COX{(#>+cWC3>Tn2uI+bGQPBL2jbZ(c1$?aQTvyftuxz3NZCliU#bY+U8V zeBSvoeSAf-qY{FV(#uPm%*9-iwp~83$$9rY3&?W~M?3P!S&LNQUwE--=#Dgprj0)heo0Ku*}S;Q#N zg01Jyx?po$u*0DUOl_<#zs$duKSr0_Atw-lsBzWek03vHAlV8E-G`BBJcSTyn&BOS zETTzqVjfAM%3FMWFrG$XhbrSA;@pzXhE7q9KNT9`&mO!$ut?1j$nB`h8?c#Eo0+Wl zrhr$HDI6BjG_CVqMDY5L^o?m(`;;83bFsb(O#BLrQ^p#~a{IkZODW&C%=>w3UT`zx z(QJa?st`aZAL2(HVLLj2f@l&l2+2HTBDde$fF&i~YKoTSWkV|unJChCSiP~p9Z+$K zhU|o?*0b2!VAKyq{8E+mURcvkhmd>96Q*7B`ZJgHk5=r>53633?Hjw0SIe)ZmkN%LNRt4-qH^8R;^z8(}>`~(*gql009Dm0s#Pv_@Bz? z1LGwA50DIt)$@Pf@yYOtHY}FS_7M>*?0=He5R>DQ$$5A0q0}Fb(oba_ zd#-|uFg|uIQKTW^F$c)yD*w!CLSBw{Z2Zrl{mHHOT%0SC`5UBLpkukxYF7vH8NTVl zM-BG$=j8jM!0P^&=5s12#?Qv?^>1l_9e?!0R4MBNZOOjRr`zKJ*5&W_Tbo+z#K9%5 zR8SkxWYgtQmVd+uoqZ#$f2osXxLJXTew_UDcjNKLMWgo8h*EB6m-Hdu8+}NrRkyz^ zNJ)ff3tkWt>o#JyZBg-ND*Q|{s^al?O51l57;Bsbf_-PTz^P9`VmF0;n7I&G_D74j z%1)PXw$WPug7e!b3+VFfpRCYsv_3Mo59NPPo!djq@@`Rh(hC4i*6FKLgT*O@M)j&( zE|ym#N3wiGksswdT1);c&J;C5`%Vr&^6XerxW>Fk&Tvb5Y~Z|TjO?K;;%f`u`nO}u zp=&mrl{h=mgR!l58 ziZT9Kv(=a!6NeR8({%z3WtV02i)!(9B?$a|fY=tINJ7`LFh?b9< z73@GeFj^Li!WeN#-}+3$ zAXD+CP5g92El|m!MP}($?zp!psmC6Jc)yLPY5AM_{Z&aw1(+ogt2=FIB68#gS2`4( z!z;Ggui4M1G&gH6m(gkAMfn33Qsk~B-jFL!D)joINK$AoJEUb|pXlxi*)%VeH0|YO zTlmI~;xswgw|9I_!l&|PV%sW#c|f$p`G#VO_9OzV8NXvbPjY4t0)hbn0Kug*t zkOKK3cd4im1Z`OQ^8_M?+kEH4_He@Wi4;J0>w8xhs7hkbCHSNL=9K8onl~TPn7H4- zP;%)gv#msC3y-mbs;p1uch|vFk>QI2?i1%NN*#&2O9{=;Y@wNlIt~l#^gw4_-4Ftqz!tVpkR@Y~B&3 z0J8w5-R5076V&}kLdX*7AAec6Z%rcV;mPG%0sx;-c@^;eI2y>2?nc!{^E_^=KW!32 zW&I!CyjQi#Ztd*5plV;Fj`o~1yC7+;>rK(PyGVq8-^`4>)v z0qOCeeStf#fMU12#MI2e9!Dg#`0~Oh_tE`v`sVltRJD8qDST{l%bjl-h9{Ag_0m@; z#ZQ2!Ym;^+R)W?DCh|)BJm0uA>zxqk)>>(cu~woe3jqA?jx%}k5?PmelZkro+0i0M zH^|Cw??BkNvQR8IfKyKI#&*#PH*ea}EuKd)k)SX0DEUGe%Y@|5vnqF-*)YVLbAwwHo^&%QSgW)B;W=&;%rpSSeHeYafrr(AW|>;rBYFcK z9;*0i2kLsOqB_k<6Rq^OI8=WCb^F(Lsyjy4(@D&(&u0cRk9gUz1YG3BD_O54@^R)!7W`l80z)1^DGjj)u*XX&v~0f&ra~1&LUx z7E7Ahi8;Z2;dnj~yYRcyNxhG?DS~dw;|$D*DtsCmp|4fSY5Bt*Hc6)M3*}jABJ}a~ zLOdtk8Bc%0f0gO1z^@v>b->gk)3f(o1+Rpxd&=1Ir u;}fz4XMrt?k5Q*CDgCg9gj1(jh553G<1Qsuy)nWlwj6fnU&+pmaOrG-/lib directory. +namespace.art.permitted.paths += /system/${LIB} +namespace.art.asan.permitted.paths += /system/${LIB} +namespace.art.links = system,neuralnetworks,adbd +# Need allow_all_shared_libs because libart.so can dlopen oat files in +# /system/framework and /data. +# TODO(b/130340935): Use a dynamically created linker namespace similar to +# classloader-namespace for oat files, and tighten this up. +namespace.art.link.system.allow_all_shared_libs = true +namespace.art.link.neuralnetworks.shared_libs = libneuralnetworks.so +namespace.art.link.adbd.shared_libs = libadbconnection_client.so + +############################################################################### +# "system" namespace +# +# Corresponds to the default namespace in /system/etc/ld.config.txt. Please keep +# in sync with linker config files in system/core/rootdir/etc. +############################################################################### +namespace.system.isolated = true +# Visible to allow links to be created at runtime, e.g. through +# android_link_namespaces in libnativeloader. +namespace.system.visible = true + +namespace.system.search.paths = /system/${LIB} +namespace.system.asan.search.paths = /data/asan/system/${LIB} + +namespace.system.links = art,adbd +namespace.system.link.art.shared_libs = libandroidicu.so +namespace.system.link.art.shared_libs += libdexfile_external.so +namespace.system.link.art.shared_libs += libdexfiled_external.so +# TODO(b/120786417 or b/134659294): libicuuc.so and libicui18n.so are kept for app compat. +namespace.system.link.art.shared_libs += libicui18n.so +namespace.system.link.art.shared_libs += libicuuc.so +namespace.system.link.art.shared_libs += libnativebridge.so +namespace.system.link.art.shared_libs += libnativehelper.so +namespace.system.link.art.shared_libs += libnativeloader.so + +# TODO(b/122876336): Remove libpac.so once it's migrated to Webview +namespace.system.link.art.shared_libs += libpac.so + +namespace.system.link.adbd.shared_libs = libadb_pairing_auth.so +namespace.system.link.adbd.shared_libs += libadb_pairing_connection.so +namespace.system.link.adbd.shared_libs += libadb_pairing_server.so + +# /system/lib/libc.so, etc are symlinks to +# /apex/com.android.runtime/lib/bionic/libc.so, etc. Add the path to the +# permitted paths because linker uses realpath(3) to check the accessibility +# of the lib. We could add this to search.paths instead but that makes the +# resolution of bionic libs be dependent on the order of /system/lib and +# /apex/.../lib/bionic in search.paths. If the latter is after the former, +# then the latter is never tried because libc.so is always found in +# /system/lib but fails to pass the accessibility test because of its realpath. +# It's better to not depend on the ordering if possible. +namespace.system.permitted.paths = /apex/com.android.runtime/${LIB}/bionic +namespace.system.asan.permitted.paths = /apex/com.android.runtime/${LIB}/bionic + +# Note that we don't need to link the art namespace with conscrypt: +# the runtime Java code and binaries do not explicitly load native libraries +# from it. + +############################################################################### +# "conscrypt" APEX namespace +# +# This namespace is for libraries within the conscrypt APEX. +############################################################################### + +# Keep in sync with the "conscrypt" namespace in system/core/rootdir/etc/ld.config*.txt. +namespace.conscrypt.isolated = true +namespace.conscrypt.visible = true + +namespace.conscrypt.search.paths = /apex/com.android.conscrypt/${LIB} +namespace.conscrypt.asan.search.paths = /apex/com.android.conscrypt/${LIB} +# TODO(b/144533348): to allow symlinks pointing the libs under /system/lib +# Note that this however does not open all libs in the system partition to +# the APEX namespaces, because searching of the libs are NOT done in +# /system/lib, but in /apex//lib directory. +namespace.conscrypt.permitted.paths = /system/${LIB} +namespace.conscrypt.asan.permitted.paths = /system/${LIB} +namespace.conscrypt.links = art,system +namespace.conscrypt.link.art.shared_libs = libandroidio.so +namespace.conscrypt.link.system.shared_libs = libc.so +namespace.conscrypt.link.system.shared_libs += libm.so +namespace.conscrypt.link.system.shared_libs += libdl.so +namespace.conscrypt.link.system.shared_libs += liblog.so + +############################################################################### +# "neuralnetworks" APEX namespace +# +# This namespace is for libraries within the NNAPI APEX. +############################################################################### +namespace.neuralnetworks.isolated = true +namespace.neuralnetworks.visible = true + +namespace.neuralnetworks.search.paths = /apex/com.android.neuralnetworks/${LIB} +namespace.neuralnetworks.asan.search.paths = /apex/com.android.neuralnetworks/${LIB} +# TODO(b/144533348): to allow symlinks pointing the libs under /system/lib +# Note that this however does not open all libs in the system partition to +# the APEX namespaces, because searching of the libs are NOT done in +# /system/lib, but in /apex//lib directory. +namespace.neuralnetworks.permitted.paths = /system/${LIB} +namespace.neuralnetworks.asan.permitted.paths = /system/${LIB} +namespace.neuralnetworks.links = system +namespace.neuralnetworks.link.system.shared_libs = libc.so +namespace.neuralnetworks.link.system.shared_libs += libcgrouprc.so +namespace.neuralnetworks.link.system.shared_libs += libdl.so +namespace.neuralnetworks.link.system.shared_libs += liblog.so +namespace.neuralnetworks.link.system.shared_libs += libm.so +namespace.neuralnetworks.link.system.shared_libs += libnativewindow.so +namespace.neuralnetworks.link.system.shared_libs += libneuralnetworks_packageinfo.so +namespace.neuralnetworks.link.system.shared_libs += libsync.so +namespace.neuralnetworks.link.system.shared_libs += libvndksupport.so + +############################################################################### +# "adbd" APEX namespace +# +# This namespace is for libraries within the adbd APEX. +############################################################################### + +namespace.adbd.isolated = true +namespace.adbd.visible = true + +namespace.adbd.search.paths = /apex/com.android.adbd/${LIB} +namespace.adbd.asan.search.paths = /apex/com.android.adbd/${LIB} +namespace.adbd.links = system +namespace.adbd.link.system.shared_libs = libc.so +namespace.adbd.link.system.shared_libs += libm.so +namespace.adbd.link.system.shared_libs += libdl.so +namespace.adbd.link.system.shared_libs += liblog.so diff --git a/build/apex/manifest-art.json b/build/apex/manifest-art.json new file mode 100644 index 0000000..59cbfac --- /dev/null +++ b/build/apex/manifest-art.json @@ -0,0 +1,4 @@ +{ + "name": "com.android.art", + "version": 1 +} diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh new file mode 100755 index 0000000..72bf74b --- /dev/null +++ b/build/apex/runtests.sh @@ -0,0 +1,202 @@ +#!/bin/bash + +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Run ART APEX tests. + +SCRIPT_DIR=$(dirname $0) + +# Status of whole test script. +exit_status=0 +# Status of current test suite. +test_status=0 + +function say { + echo "$0: $*" +} + +function die { + echo "$0: $*" + exit 1 +} + +function setup_die { + die "You need to source and lunch before you can use this script." +} + +[[ -n "$ANDROID_BUILD_TOP" ]] || setup_die +[[ -n "$ANDROID_PRODUCT_OUT" ]] || setup_die +[[ -n "$ANDROID_HOST_OUT" ]] || setup_die + +flattened_apex_p=$($ANDROID_BUILD_TOP/build/soong/soong_ui.bash --dumpvar-mode TARGET_FLATTEN_APEX)\ + || setup_die + +have_debugfs_p=false +if $flattened_apex_p; then :; else + if [ ! -e "$ANDROID_HOST_OUT/bin/debugfs" ] ; then + say "Could not find debugfs, building now." + build/soong/soong_ui.bash --make-mode debugfs-host || die "Cannot build debugfs" + fi + have_debugfs_p=true +fi + +# Fail early. +set -e + +build_apex_p=true +list_image_files_p=false +print_image_tree_p=false +print_file_sizes_p=false + +function usage { + cat < 0 { + deviceFrameSizeLimit = 7400 + } + cflags = append(cflags, + fmt.Sprintf("-Wframe-larger-than=%d", deviceFrameSizeLimit), + fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", deviceFrameSizeLimit), + ) + + cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.Config().LibartImgDeviceBaseAddress()) + if ctx.Config().IsEnvTrue("ART_TARGET_LINUX") { + cflags = append(cflags, "-DART_TARGET_LINUX") + } else { + cflags = append(cflags, "-DART_TARGET_ANDROID") + } + minDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA", "-0x1000000") + maxDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA", "0x1000000") + cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta) + cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta) + + return cflags +} + +func hostFlags(ctx android.LoadHookContext) []string { + var cflags []string + hostFrameSizeLimit := 1736 + if len(ctx.Config().SanitizeHost()) > 0 { + // art/test/137-cfi/cfi.cc + // error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess' + hostFrameSizeLimit = 6400 + } + cflags = append(cflags, + fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit), + fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", hostFrameSizeLimit), + ) + + cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.Config().LibartImgHostBaseAddress()) + minDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA", "-0x1000000") + maxDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA", "0x1000000") + cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta) + cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta) + + if len(ctx.Config().SanitizeHost()) > 0 && !ctx.Config().IsEnvFalse("ART_ENABLE_ADDRESS_SANITIZER") { + // We enable full sanitization on the host by default. + cflags = append(cflags, "-DART_ENABLE_ADDRESS_SANITIZER=1") + } + + return cflags +} + +func globalDefaults(ctx android.LoadHookContext) { + type props struct { + Target struct { + Android struct { + Cflags []string + } + Host struct { + Cflags []string + } + } + Cflags []string + Asflags []string + Sanitize struct { + Recover []string + } + } + + p := &props{} + p.Cflags, p.Asflags = globalFlags(ctx) + p.Target.Android.Cflags = deviceFlags(ctx) + p.Target.Host.Cflags = hostFlags(ctx) + + if ctx.Config().IsEnvTrue("ART_DEX_FILE_ACCESS_TRACKING") { + p.Cflags = append(p.Cflags, "-DART_DEX_FILE_ACCESS_TRACKING") + p.Sanitize.Recover = []string{ + "address", + } + } + + ctx.AppendProperties(p) +} + +func debugDefaults(ctx android.LoadHookContext) { + type props struct { + Cflags []string + } + + p := &props{} + p.Cflags = debugFlags(ctx) + ctx.AppendProperties(p) +} + +func customLinker(ctx android.LoadHookContext) { + linker := ctx.Config().Getenv("CUSTOM_TARGET_LINKER") + type props struct { + DynamicLinker string + } + + p := &props{} + if linker != "" { + p.DynamicLinker = linker + } + + ctx.AppendProperties(p) +} + +func prefer32Bit(ctx android.LoadHookContext) { + type props struct { + Target struct { + Host struct { + Compile_multilib *string + } + } + } + + p := &props{} + if ctx.Config().IsEnvTrue("HOST_PREFER_32_BIT") { + p.Target.Host.Compile_multilib = proptools.StringPtr("prefer32") + } + + // Prepend to make it overridable in the blueprints. Note that it doesn't work + // to override the property in a cc_defaults module. + ctx.PrependProperties(p) +} + +var testMapKey = android.NewOnceKey("artTests") + +func testMap(config android.Config) map[string][]string { + return config.Once(testMapKey, func() interface{} { + return make(map[string][]string) + }).(map[string][]string) +} + +func testInstall(ctx android.InstallHookContext) { + testMap := testMap(ctx.Config()) + + var name string + if ctx.Host() { + name = "host_" + } else { + name = "device_" + } + name += ctx.Arch().ArchType.String() + "_" + ctx.ModuleName() + + artTestMutex.Lock() + defer artTestMutex.Unlock() + + tests := testMap[name] + tests = append(tests, ctx.Path().ToMakePath().String()) + testMap[name] = tests +} + +var artTestMutex sync.Mutex + +func init() { + artModuleTypes := []string{ + "art_cc_library", + "art_cc_library_static", + "art_cc_binary", + "art_cc_test", + "art_cc_test_library", + "art_cc_defaults", + "libart_cc_defaults", + "libart_static_cc_defaults", + "art_global_defaults", + "art_debug_defaults", + "art_apex_test_host", + } + android.AddNeverAllowRules( + android.NeverAllow(). + NotIn("art", "external/vixl"). + ModuleType(artModuleTypes...)) + + android.RegisterModuleType("art_cc_library", artLibrary) + android.RegisterModuleType("art_cc_library_static", artStaticLibrary) + android.RegisterModuleType("art_cc_binary", artBinary) + android.RegisterModuleType("art_cc_test", artTest) + android.RegisterModuleType("art_cc_test_library", artTestLibrary) + android.RegisterModuleType("art_cc_defaults", artDefaultsFactory) + android.RegisterModuleType("libart_cc_defaults", libartDefaultsFactory) + android.RegisterModuleType("libart_static_cc_defaults", libartStaticDefaultsFactory) + android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory) + android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory) + + // ART apex is special because it must include dexpreopt files for bootclasspath jars. + android.RegisterModuleType("art_apex", artApexBundleFactory) + android.RegisterModuleType("art_apex_test", artTestApexBundleFactory) + + // TODO: This makes the module disable itself for host if HOST_PREFER_32_BIT is + // set. We need this because the multilib types of binaries listed in the apex + // rule must match the declared type. This is normally not difficult but HOST_PREFER_32_BIT + // changes this to 'prefer32' on all host binaries. Since HOST_PREFER_32_BIT is + // only used for testing we can just disable the module. + // See b/120617876 for more information. + android.RegisterModuleType("art_apex_test_host", artHostTestApexBundleFactory) +} + +func artApexBundleFactory() android.Module { + return apex.ApexBundleFactory(false /*testApex*/, true /*artApex*/) +} + +func artTestApexBundleFactory() android.Module { + return apex.ApexBundleFactory(true /*testApex*/, true /*artApex*/) +} + +func artHostTestApexBundleFactory() android.Module { + module := apex.ApexBundleFactory(true /*testApex*/, true /*artApex*/) + android.AddLoadHook(module, func(ctx android.LoadHookContext) { + if ctx.Config().IsEnvTrue("HOST_PREFER_32_BIT") { + type props struct { + Target struct { + Host struct { + Enabled *bool + } + } + } + + p := &props{} + p.Target.Host.Enabled = proptools.BoolPtr(false) + ctx.AppendProperties(p) + log.Print("Disabling host build of " + ctx.ModuleName() + " for HOST_PREFER_32_BIT=true") + } + }) + + return module +} + +func artGlobalDefaultsFactory() android.Module { + module := artDefaultsFactory() + android.AddLoadHook(module, globalDefaults) + + return module +} + +func artDebugDefaultsFactory() android.Module { + module := artDefaultsFactory() + android.AddLoadHook(module, debugDefaults) + + return module +} + +func artDefaultsFactory() android.Module { + c := &codegenProperties{} + module := cc.DefaultsFactory(c) + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticAndSharedLibrary) }) + + return module +} + +func libartDefaultsFactory() android.Module { + c := &codegenProperties{} + module := cc.DefaultsFactory(c) + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticAndSharedLibrary) }) + + return module +} + +func libartStaticDefaultsFactory() android.Module { + c := &codegenProperties{} + module := cc.DefaultsFactory(c) + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticLibrary) }) + + return module +} + +func artLibrary() android.Module { + module := cc.LibraryFactory() + + installCodegenCustomizer(module, staticAndSharedLibrary) + + return module +} + +func artStaticLibrary() android.Module { + module := cc.LibraryStaticFactory() + + installCodegenCustomizer(module, staticLibrary) + + return module +} + +func artBinary() android.Module { + module := cc.BinaryFactory() + + android.AddLoadHook(module, customLinker) + android.AddLoadHook(module, prefer32Bit) + return module +} + +func artTest() android.Module { + module := cc.TestFactory() + + installCodegenCustomizer(module, binary) + + android.AddLoadHook(module, customLinker) + android.AddLoadHook(module, prefer32Bit) + android.AddInstallHook(module, testInstall) + return module +} + +func artTestLibrary() android.Module { + module := cc.TestLibraryFactory() + + installCodegenCustomizer(module, staticAndSharedLibrary) + + android.AddLoadHook(module, prefer32Bit) + android.AddInstallHook(module, testInstall) + return module +} diff --git a/build/codegen.go b/build/codegen.go new file mode 100644 index 0000000..bc7dc42 --- /dev/null +++ b/build/codegen.go @@ -0,0 +1,218 @@ +// Copyright (C) 2016 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package art + +// This file implements the "codegen" property to apply different properties based on the currently +// selected codegen arches, which defaults to all arches on the host and the primary and secondary +// arches on the device. + +import ( + "sort" + "strings" + + "android/soong/android" +) + +type moduleType struct { + library bool + static bool + shared bool +} + +var ( + staticLibrary = moduleType{true, true, false} + sharedLibrary = moduleType{true, false, true} + staticAndSharedLibrary = moduleType{true, true, true} + binary = moduleType{false, false, false} +) + +func codegen(ctx android.LoadHookContext, c *codegenProperties, t moduleType) { + var hostArches, deviceArches []string + + e := ctx.Config().Getenv("ART_HOST_CODEGEN_ARCHS") + if e == "" { + hostArches = supportedArches + } else { + hostArches = strings.Split(e, " ") + } + + e = ctx.Config().Getenv("ART_TARGET_CODEGEN_ARCHS") + if e == "" { + deviceArches = defaultDeviceCodegenArches(ctx) + } else { + deviceArches = strings.Split(e, " ") + } + + getCodegenArchProperties := func(archName string) *codegenArchProperties { + var arch *codegenArchProperties + switch archName { + case "arm": + arch = &c.Codegen.Arm + case "arm64": + arch = &c.Codegen.Arm64 + case "x86": + arch = &c.Codegen.X86 + case "x86_64": + arch = &c.Codegen.X86_64 + default: + ctx.ModuleErrorf("Unknown codegen architecture %q", archName) + } + return arch + } + + appendCodegenSourceArchProperties := func(p *CodegenSourceArchProperties, archName string) { + arch := getCodegenArchProperties(archName) + p.Srcs = append(p.Srcs, arch.CodegenSourceArchProperties.Srcs...) + } + + addCodegenSourceArchProperties := func(host bool, p *CodegenSourceArchProperties) { + type sourceProps struct { + Target struct { + Android *CodegenSourceArchProperties + Host *CodegenSourceArchProperties + } + } + + sp := &sourceProps{} + if host { + sp.Target.Host = p + } else { + sp.Target.Android = p + } + ctx.AppendProperties(sp) + } + + addCodegenArchProperties := func(host bool, archName string) { + type commonProps struct { + Target struct { + Android *CodegenCommonArchProperties + Host *CodegenCommonArchProperties + } + } + + type sharedLibraryProps struct { + Target struct { + Android *CodegenLibraryArchSharedProperties + Host *CodegenLibraryArchSharedProperties + } + } + + type staticLibraryProps struct { + Target struct { + Android *CodegenLibraryArchStaticProperties + Host *CodegenLibraryArchStaticProperties + } + } + + arch := getCodegenArchProperties(archName) + + cp := &commonProps{} + sharedLP := &sharedLibraryProps{} + staticLP := &staticLibraryProps{} + if host { + cp.Target.Host = &arch.CodegenCommonArchProperties + sharedLP.Target.Host = &arch.CodegenLibraryArchSharedProperties + staticLP.Target.Host = &arch.CodegenLibraryArchStaticProperties + } else { + cp.Target.Android = &arch.CodegenCommonArchProperties + sharedLP.Target.Android = &arch.CodegenLibraryArchSharedProperties + staticLP.Target.Android = &arch.CodegenLibraryArchStaticProperties + } + + ctx.AppendProperties(cp) + if t.library { + if t.static { + ctx.AppendProperties(staticLP) + } + if t.shared { + ctx.AppendProperties(sharedLP) + } + } + } + + addCodegenProperties := func(host bool, arches []string) { + sourceProps := &CodegenSourceArchProperties{} + for _, arch := range arches { + appendCodegenSourceArchProperties(sourceProps, arch) + addCodegenArchProperties(host, arch) + } + sourceProps.Srcs = android.FirstUniqueStrings(sourceProps.Srcs) + addCodegenSourceArchProperties(host, sourceProps) + } + + addCodegenProperties(false /* host */, deviceArches) + addCodegenProperties(true /* host */, hostArches) +} + +// These properties are allowed to contain the same source file name in different architectures. +// They we will be deduplicated automatically. +type CodegenSourceArchProperties struct { + Srcs []string +} + +type CodegenCommonArchProperties struct { + Cflags []string + Cppflags []string +} + +type CodegenLibraryArchStaticProperties struct { + Static struct { + Whole_static_libs []string + } +} +type CodegenLibraryArchSharedProperties struct { + Shared struct { + Shared_libs []string + Export_shared_lib_headers []string + } +} + +type codegenArchProperties struct { + CodegenSourceArchProperties + CodegenCommonArchProperties + CodegenLibraryArchStaticProperties + CodegenLibraryArchSharedProperties +} + +type codegenProperties struct { + Codegen struct { + Arm, Arm64, X86, X86_64 codegenArchProperties + } +} + +func defaultDeviceCodegenArches(ctx android.LoadHookContext) []string { + arches := make(map[string]bool) + for _, a := range ctx.DeviceConfig().Arches() { + s := a.ArchType.String() + arches[s] = true + if s == "arm64" { + arches["arm"] = true + } else if s == "x86_64" { + arches["x86"] = true + } + } + ret := make([]string, 0, len(arches)) + for a := range arches { + ret = append(ret, a) + } + sort.Strings(ret) + return ret +} + +func installCodegenCustomizer(module android.Module, t moduleType) { + c := &codegenProperties{} + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, t) }) + module.AddProperties(c) +} diff --git a/build/makevars.go b/build/makevars.go new file mode 100644 index 0000000..1faa0f6 --- /dev/null +++ b/build/makevars.go @@ -0,0 +1,47 @@ +// Copyright (C) 2016 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package art + +import ( + "sort" + "strings" + + "android/soong/android" +) + +var ( + pctx = android.NewPackageContext("android/soong/art") +) + +func init() { + android.RegisterMakeVarsProvider(pctx, makeVarsProvider) +} + +func makeVarsProvider(ctx android.MakeVarsContext) { + ctx.Strict("LIBART_IMG_HOST_BASE_ADDRESS", ctx.Config().LibartImgHostBaseAddress()) + ctx.Strict("LIBART_IMG_TARGET_BASE_ADDRESS", ctx.Config().LibartImgDeviceBaseAddress()) + + testMap := testMap(ctx.Config()) + var testNames []string + for name := range testMap { + testNames = append(testNames, name) + } + + sort.Strings(testNames) + + for _, name := range testNames { + ctx.Strict("ART_TEST_LIST_"+name, strings.Join(testMap[name], " ")) + } +} diff --git a/build/sdk/Android.bp b/build/sdk/Android.bp new file mode 100644 index 0000000..ed9a4fe --- /dev/null +++ b/build/sdk/Android.bp @@ -0,0 +1,53 @@ +// Copyright (C) 2020 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The SDK for the art module apex. +sdk { + name: "art-module-sdk", + java_system_modules: [ + "art-module-public-api-stubs-system-modules", + "art-module-intra-core-api-stubs-system-modules", + "art-module-platform-api-stubs-system-modules", + ], + native_static_libs: [ + "libartimagevalues", + ], +} + +// Exported host tools and libraries. +module_exports { + name: "art-module-host-exports", + host_supported: true, + device_supported: false, + java_libs: [ + "timezone-host", + ], +} + +// Exported tests and supporting libraries +module_exports { + name: "art-module-test-exports", + java_libs: [ + "core-compat-test-rules", + "core-test-rules", + "core-tests-support", + "okhttp-tests-nojarjar", + ], + java_tests: [ + "libcore-crypto-tests", + ], + native_shared_libs: [ + "libjavacoretests", + ], +} diff --git a/cmdline/Android.bp b/cmdline/Android.bp new file mode 100644 index 0000000..3eac0ed --- /dev/null +++ b/cmdline/Android.bp @@ -0,0 +1,35 @@ +// +// Copyright (C) 2016 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// TODO: this header library depends on libart. Find a way to express that. +cc_library_headers { + name: "art_cmdlineparser_headers", + host_supported: true, + export_include_dirs: ["."], + + apex_available: [ + "com.android.art.debug", + "com.android.art.release", + ], +} + +art_cc_test { + name: "art_cmdline_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["cmdline_parser_test.cc"], +} diff --git a/cmdline/README.md b/cmdline/README.md new file mode 100644 index 0000000..8cac77f --- /dev/null +++ b/cmdline/README.md @@ -0,0 +1,245 @@ +Cmdline +=================== + +Introduction +------------- +This directory contains the classes that do common command line tool initialization and parsing. The +long term goal is eventually for all `art` command-line tools to be using these helpers. + +---------- + + +## Cmdline Parser +------------- + +The `CmdlineParser` class provides a fluent interface using a domain-specific language to quickly +generate a type-safe value parser that process a user-provided list of strings (`argv`). Currently, +it can parse a string into a `VariantMap`, although in the future it might be desirable to parse +into any struct of any field. + +To use, create a `CmdlineParser::Builder` and then chain the `Define` methods together with +`WithType` and `IntoXX` methods. + +### Quick Start +For example, to save the values into a user-defined variant map: + +``` +struct FruitVariantMap : VariantMap { + static const Key Apple; + static const Key Orange; + static const Key Help; +}; +// Note that some template boilerplate has been avoided for clarity. +// See variant_map_test.cc for how to completely define a custom map. + +using FruitParser = CmdlineParser; + +FruitParser MakeParser() { + auto&& builder = FruitParser::Builder(); + builder. + .Define("--help") + .IntoKey(FruitVariantMap::Help) + Define("--apple:_") + .WithType() + .IntoKey(FruitVariantMap::Apple) + .Define("--orange:_") + .WithType() + .WithRange(0.0, 1.0) + .IntoKey(FruitVariantMap::Orange); + + return builder.Build(); +} + +int main(char** argv, int argc) { + auto parser = MakeParser(); + auto result = parser.parse(argv, argc)); + if (result.isError()) { + std::cerr << result.getMessage() << std::endl; + return EXIT_FAILURE; + } + auto map = parser.GetArgumentsMap(); + std::cout << "Help? " << map.GetOrDefault(FruitVariantMap::Help) << std::endl; + std::cout << "Apple? " << map.GetOrDefault(FruitVariantMap::Apple) << std::endl; + std::cout << "Orange? " << map.GetOrDefault(FruitVariantMap::Orange) << std::endl; + + return EXIT_SUCCESS; +} +``` + +In the above code sample, we define a parser which is capable of parsing something like `--help +--apple:123 --orange:0.456` . It will error out automatically if invalid flags are given, or if the +appropriate flags are given but of the the wrong type/range. So for example, `--foo` will not parse +(invalid argument), neither will `--apple:fruit` (fruit is not an int) nor `--orange:1234` (1234 is +out of range of [0.0, 1.0]) + +### Argument Definitions in Detail +#### Define method +The 'Define' method takes one or more aliases for the argument. Common examples might be `{"-h", +"--help"}` where both `--help` and `-h` are aliases for the same argument. + +The simplest kind of argument just tests for presence, but we often want to parse out a particular +type of value (such as an int or double as in the above `FruitVariantMap` example). To do that, a +_wildcard_ must be used to denote the location within the token that the type will be parsed out of. + +For example with `-orange:_` the parse would know to check all tokens in an `argv` list for the +`-orange:` prefix and then strip it, leaving only the remains to be parsed. + +#### WithType method (optional) +After an argument definition is provided, the parser builder needs to know what type the argument +will be in order to provide the type safety and make sure the rest of the argument definition is +correct as early as possible (in essence, everything but the parsing of the argument name is done at +compile time). + +Everything that follows a `WithType()` call is thus type checked to only take `T` values. + +If this call is omitted, the parser generator assumes you are building a `Unit` type (i.e. an +argument that only cares about presence). + +#### WithRange method (optional) +Some values will not make sense outside of a `[min, max]` range, so this is an option to quickly add +a range check without writing custom code. The range check is performed after the main parsing +happens and happens for any type implementing the `<=` operators. + +#### WithValueMap (optional) +When parsing an enumeration, it might be very convenient to map a list of possible argument string +values into its runtime value. + +With something like +``` + .Define("-hello:_") + .WithValueMap({"world", kWorld}, + {"galaxy", kGalaxy}) +``` +It will parse either `-hello:world` or `-hello:galaxy` only (and error out on other variations of +`-hello:whatever`), converting it to the type-safe value of `kWorld` or `kGalaxy` respectively. + +This is meant to be another shorthand (like `WithRange`) to avoid writing a custom type parser. In +general it takes a variadic number of `pair`. + +#### WithValues (optional) +When an argument definition has multiple aliases with no wildcards, it might be convenient to +quickly map them into discrete values. + +For example: +``` + .Define({"-xinterpret", "-xnointerpret"}) + .WithValues({true, false} +``` +It will parse `-xinterpret` as `true` and `-xnointerpret` as `false`. + +In general, it uses the position of the argument alias to map into the WithValues position value. + +(Note that this method will not work when the argument definitions have a wildcard because there is +no way to position-ally match that). + +#### AppendValues (optional) +By default, the argument is assumed to appear exactly once, and if the user specifies it more than +once, only the latest value is taken into account (and all previous occurrences of the argument are +ignored). + +In some situations, we may want to accumulate the argument values instead of discarding the previous +ones. + +For example +``` + .Define("-D") + .WithType)() + .AppendValues() +``` +Will parse something like `-Dhello -Dworld -Dbar -Dbaz` into `std::vector{"hello", +"world", "bar", "baz"}`. + +### Setting an argument parse target (required) +To complete an argument definition, the parser generator also needs to know where to save values. +Currently, only `IntoKey` is supported, but that may change in the future. + +#### IntoKey (required) +This specifies that when a value is parsed, it will get saved into a variant map using the specific +key. + +For example, +``` + .Define("-help") + .IntoKey(Map::Help) +``` +will save occurrences of the `-help` argument by doing a `Map.Set(Map::Help, ParsedValue("-help"))` +where `ParsedValue` is an imaginary function that parses the `-help` argment into a specific type +set by `WithType`. + +### Ignoring unknown arguments +This is highly discouraged, but for compatibility with `JNI` which allows argument ignores, there is +an option to ignore any argument tokens that are not known to the parser. This is done with the +`Ignore` function which takes a list of argument definition names. + +It's semantically equivalent to making a series of argument definitions that map to `Unit` but don't +get saved anywhere. Values will still get parsed as normal, so it will *not* ignore known arguments +with invalid values, only user-arguments for which it could not find a matching argument definition. + +### Parsing custom types +Any type can be parsed from a string by specializing the `CmdlineType` class and implementing the +static interface provided by `CmdlineTypeParser`. It is recommended to inherit from +`CmdlineTypeParser` since it already provides default implementations for every method. + +The `Parse` method should be implemented for most types. Some types will allow appending (such as an +`std::vector` and are meant to be used with `AppendValues` in which case the +`ParseAndAppend` function should be implemented. + +For example: +``` +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + char* end = nullptr; + errno = 0; + double value = strtod(str.c_str(), &end); + + if (*end != '\0') { + return Result::Failure("Failed to parse double from " + str); + } + if (errno == ERANGE) { + return Result::OutOfRange( + "Failed to parse double from " + str + "; overflow/underflow occurred"); + } + + return Result::Success(value); + } + + static const char* Name() { return "double"; } + // note: Name() is just here for more user-friendly errors, + // but in the future we will use non-standard ways of getting the type name + // at compile-time and this will no longer be required +}; +``` +Will parse any non-append argument definitions with a type of `double`. + +For an appending example: +``` +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result ParseAndAppend(const std::string& args, + std::vector& existing_value) { + existing_value.push_back(args); + return Result::SuccessNoValue(); + } + static const char* Name() { return "std::vector"; } +}; +``` +Will parse multiple instances of the same argument repeatedly into the `existing_value` (which will +be default-constructed to `T{}` for the first occurrence of the argument). + +#### What is a `Result`? +`Result` is a typedef for `CmdlineParseResult` and it acts similar to a poor version of +`Either` in Haskell. In particular, it would be similar to `Either< int ErrorCode, +Maybe >`. + +There are helpers like `Result::Success(value)`, `Result::Failure(string message)` and so on to +quickly construct these without caring about the type. + +When successfully parsing a single value, `Result::Success(value)` should be used, and when +successfully parsing an appended value, use `Result::SuccessNoValue()` and write back the new value +into `existing_value` as an out-parameter. + +When many arguments are parsed, the result is collapsed down to a `CmdlineResult` which acts as a +`Either` where the right side simply indicates success. When values are +successfully stored, the parser will automatically save it into the target destination as a side +effect. diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h new file mode 100644 index 0000000..5821496 --- /dev/null +++ b/cmdline/cmdline.h @@ -0,0 +1,422 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_H_ +#define ART_CMDLINE_CMDLINE_H_ + +#include +#include + +#include +#include +#include +#include + +#include "android-base/stringprintf.h" + +#include "base/file_utils.h" +#include "base/logging.h" +#include "base/mutex.h" +#include "base/string_view_cpp20.h" +#include "noop_compiler_callbacks.h" +#include "runtime.h" + +#if !defined(NDEBUG) +#define DBG_LOG LOG(INFO) +#else +#define DBG_LOG LOG(DEBUG) +#endif + +namespace art { + +// TODO: Move to and remove all copies of this function. +static bool LocationToFilename(const std::string& location, InstructionSet isa, + std::string* filename) { + bool has_system = false; + bool has_cache = false; + // image_location = /system/framework/boot.art + // system_image_filename = /system/framework//boot.art + std::string system_filename(GetSystemImageFilename(location.c_str(), isa)); + if (OS::FileExists(system_filename.c_str())) { + has_system = true; + } + + bool have_android_data = false; + bool dalvik_cache_exists = false; + bool is_global_cache = false; + std::string dalvik_cache; + GetDalvikCache(GetInstructionSetString(isa), false, &dalvik_cache, + &have_android_data, &dalvik_cache_exists, &is_global_cache); + + std::string cache_filename; + if (have_android_data && dalvik_cache_exists) { + // Always set output location even if it does not exist, + // so that the caller knows where to create the image. + // + // image_location = /system/framework/boot.art + // *image_filename = /data/dalvik-cache//boot.art + std::string error_msg; + if (GetDalvikCacheFilename(location.c_str(), dalvik_cache.c_str(), + &cache_filename, &error_msg)) { + has_cache = true; + } + } + if (has_system) { + *filename = system_filename; + return true; + } else if (has_cache) { + *filename = cache_filename; + return true; + } else { + *filename = system_filename; + return false; + } +} + +static Runtime* StartRuntime(const char* boot_image_location, + InstructionSet instruction_set, + const std::vector& runtime_args) { + CHECK(boot_image_location != nullptr); + + RuntimeOptions options; + + // We are more like a compiler than a run-time. We don't want to execute code. + { + static NoopCompilerCallbacks callbacks; + options.push_back(std::make_pair("compilercallbacks", &callbacks)); + } + + // Boot image location. + { + std::string boot_image_option; + boot_image_option += "-Ximage:"; + boot_image_option += boot_image_location; + options.push_back(std::make_pair(boot_image_option, nullptr)); + } + + // Instruction set. + options.push_back( + std::make_pair("imageinstructionset", + reinterpret_cast(GetInstructionSetString(instruction_set)))); + + // Explicit runtime args. + for (const char* runtime_arg : runtime_args) { + options.push_back(std::make_pair(runtime_arg, nullptr)); + } + + // None of the command line tools need sig chain. If this changes we'll need + // to upgrade this option to a proper parameter. + options.push_back(std::make_pair("-Xno-sig-chain", nullptr)); + if (!Runtime::Create(options, false)) { + fprintf(stderr, "Failed to create runtime\n"); + return nullptr; + } + + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more manageable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + + return Runtime::Current(); +} + +struct CmdlineArgs { + enum ParseStatus { + kParseOk, // Parse successful. Do not set the error message. + kParseUnknownArgument, // Unknown argument. Do not set the error message. + kParseError, // Parse ok, but failed elsewhere. Print the set error message. + }; + + bool Parse(int argc, char** argv) { + // Skip over argv[0]. + argv++; + argc--; + + if (argc == 0) { + fprintf(stderr, "No arguments specified\n"); + PrintUsage(); + return false; + } + + std::string error_msg; + for (int i = 0; i < argc; i++) { + const char* const raw_option = argv[i]; + const std::string_view option(raw_option); + if (StartsWith(option, "--boot-image=")) { + boot_image_location_ = raw_option + strlen("--boot-image="); + } else if (StartsWith(option, "--instruction-set=")) { + const char* const instruction_set_str = raw_option + strlen("--instruction-set="); + instruction_set_ = GetInstructionSetFromString(instruction_set_str); + if (instruction_set_ == InstructionSet::kNone) { + fprintf(stderr, "Unsupported instruction set %s\n", instruction_set_str); + PrintUsage(); + return false; + } + } else if (option == "--runtime-arg") { + if (i + 1 == argc) { + fprintf(stderr, "Missing argument for --runtime-arg\n"); + PrintUsage(); + return false; + } + ++i; + runtime_args_.push_back(argv[i]); + } else if (StartsWith(option, "--output=")) { + output_name_ = std::string(option.substr(strlen("--output="))); + const char* filename = output_name_.c_str(); + out_.reset(new std::ofstream(filename)); + if (!out_->good()) { + fprintf(stderr, "Failed to open output filename %s\n", filename); + PrintUsage(); + return false; + } + os_ = out_.get(); + } else { + ParseStatus parse_status = ParseCustom(raw_option, option.length(), &error_msg); + + if (parse_status == kParseUnknownArgument) { + fprintf(stderr, "Unknown argument %s\n", option.data()); + } + + if (parse_status != kParseOk) { + fprintf(stderr, "%s\n", error_msg.c_str()); + PrintUsage(); + return false; + } + } + } + + DBG_LOG << "will call parse checks"; + + { + ParseStatus checks_status = ParseChecks(&error_msg); + if (checks_status != kParseOk) { + fprintf(stderr, "%s\n", error_msg.c_str()); + PrintUsage(); + return false; + } + } + + return true; + } + + virtual std::string GetUsage() const { + std::string usage; + + usage += // Required. + " --boot-image=: provide the image location for the boot class path.\n" + " Do not include the arch as part of the name, it is added automatically.\n" + " Example: --boot-image=/system/framework/boot.art\n" + " (specifies /system/framework//boot.art as the image file)\n" + "\n"; + usage += android::base::StringPrintf( // Optional. + " --instruction-set=(arm|arm64|x86|x86_64): for locating the image\n" + " file based on the image location set.\n" + " Example: --instruction-set=x86\n" + " Default: %s\n" + "\n", + GetInstructionSetString(kRuntimeISA)); + usage += + " --runtime-arg used to specify various arguments for the runtime\n" + " such as initial heap size, maximum heap size, and verbose output.\n" + " Use a separate --runtime-arg switch for each argument.\n" + " Example: --runtime-arg -Xms256m\n" + "\n"; + usage += // Optional. + " --output= may be used to send the output to a file.\n" + " Example: --output=/tmp/oatdump.txt\n" + "\n"; + + return usage; + } + + // Specified by --boot-image. + const char* boot_image_location_ = nullptr; + // Specified by --instruction-set. + InstructionSet instruction_set_ = InstructionSet::kNone; + // Runtime arguments specified by --runtime-arg. + std::vector runtime_args_; + // Specified by --output. + std::ostream* os_ = &std::cout; + std::unique_ptr out_; // If something besides cout is used + std::string output_name_; + + virtual ~CmdlineArgs() {} + + bool ParseCheckBootImage(std::string* error_msg) { + if (boot_image_location_ == nullptr) { + *error_msg = "--boot-image must be specified"; + return false; + } + if (instruction_set_ == InstructionSet::kNone) { + LOG(WARNING) << "No instruction set given, assuming " << GetInstructionSetString(kRuntimeISA); + instruction_set_ = kRuntimeISA; + } + + DBG_LOG << "boot image location: " << boot_image_location_; + + // Checks for --boot-image location. + { + std::string boot_image_location = boot_image_location_; + size_t separator_pos = boot_image_location.find(':'); + if (separator_pos != std::string::npos) { + boot_image_location = boot_image_location.substr(/*pos*/ 0u, /*size*/ separator_pos); + } + size_t file_name_idx = boot_image_location.rfind('/'); + if (file_name_idx == std::string::npos) { // Prevent a InsertIsaDirectory check failure. + *error_msg = "Boot image location must have a / in it"; + return false; + } + + // Don't let image locations with the 'arch' in it through, since it's not a location. + // This prevents a common error "Could not create an image space..." when initing the Runtime. + if (file_name_idx != std::string::npos) { + std::string no_file_name = boot_image_location.substr(0, file_name_idx); + size_t ancestor_dirs_idx = no_file_name.rfind('/'); + + std::string parent_dir_name; + if (ancestor_dirs_idx != std::string::npos) { + parent_dir_name = no_file_name.substr(ancestor_dirs_idx + 1); + } else { + parent_dir_name = no_file_name; + } + + DBG_LOG << "boot_image_location parent_dir_name was " << parent_dir_name; + + if (GetInstructionSetFromString(parent_dir_name.c_str()) != InstructionSet::kNone) { + *error_msg = "Do not specify the architecture as part of the boot image location"; + return false; + } + } + + // Check that the boot image location points to a valid file name. + std::string file_name; + if (!LocationToFilename(boot_image_location, instruction_set_, &file_name)) { + *error_msg = android::base::StringPrintf( + "No corresponding file for location '%s' (filename '%s') exists", + boot_image_location.c_str(), + file_name.c_str()); + return false; + } + + DBG_LOG << "boot_image_filename does exist: " << file_name; + } + + return true; + } + + void PrintUsage() { + fprintf(stderr, "%s", GetUsage().c_str()); + } + + protected: + virtual ParseStatus ParseCustom(const char* raw_option ATTRIBUTE_UNUSED, + size_t raw_option_length ATTRIBUTE_UNUSED, + std::string* error_msg ATTRIBUTE_UNUSED) { + return kParseUnknownArgument; + } + + virtual ParseStatus ParseChecks(std::string* error_msg ATTRIBUTE_UNUSED) { + return kParseOk; + } +}; + +template +struct CmdlineMain { + int Main(int argc, char** argv) { + Locks::Init(); + InitLogging(argv, Runtime::Abort); + std::unique_ptr args = std::unique_ptr(CreateArguments()); + args_ = args.get(); + + DBG_LOG << "Try to parse"; + + if (args_ == nullptr || !args_->Parse(argc, argv)) { + return EXIT_FAILURE; + } + + bool needs_runtime = NeedsRuntime(); + std::unique_ptr runtime; + + + if (needs_runtime) { + std::string error_msg; + if (!args_->ParseCheckBootImage(&error_msg)) { + fprintf(stderr, "%s\n", error_msg.c_str()); + args_->PrintUsage(); + return EXIT_FAILURE; + } + runtime.reset(CreateRuntime(args.get())); + if (runtime == nullptr) { + return EXIT_FAILURE; + } + if (!ExecuteWithRuntime(runtime.get())) { + return EXIT_FAILURE; + } + } else { + if (!ExecuteWithoutRuntime()) { + return EXIT_FAILURE; + } + } + + if (!ExecuteCommon()) { + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; + } + + // Override this function to create your own arguments. + // Usually will want to return a subtype of CmdlineArgs. + virtual Args* CreateArguments() { + return new Args(); + } + + // Override this function to do something else with the runtime. + virtual bool ExecuteWithRuntime(Runtime* runtime) { + CHECK(runtime != nullptr); + // Do nothing + return true; + } + + // Does the code execution need a runtime? Sometimes it doesn't. + virtual bool NeedsRuntime() { + return true; + } + + // Do execution without having created a runtime. + virtual bool ExecuteWithoutRuntime() { + return true; + } + + // Continue execution after ExecuteWith[out]Runtime + virtual bool ExecuteCommon() { + return true; + } + + virtual ~CmdlineMain() {} + + protected: + Args* args_ = nullptr; + + private: + Runtime* CreateRuntime(CmdlineArgs* args) { + CHECK(args != nullptr); + + return StartRuntime(args->boot_image_location_, args->instruction_set_, args_->runtime_args_); + } +}; +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_H_ diff --git a/cmdline/cmdline_parse_result.h b/cmdline/cmdline_parse_result.h new file mode 100644 index 0000000..982f178 --- /dev/null +++ b/cmdline/cmdline_parse_result.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ +#define ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ + +#include "cmdline_result.h" +#include "detail/cmdline_parser_detail.h" + +namespace art { +// Result of a type-parsing attempt. If successful holds the strongly-typed value, +// otherwise it holds either a usage or a failure string message that should be displayed back +// to the user. +// +// CmdlineType::Parse/CmdlineType::ParseAndAppend must return this type. +template +struct CmdlineParseResult : CmdlineResult { + using CmdlineResult::CmdlineResult; + + // Create an error result with the usage error code and the specified message. + static CmdlineParseResult Usage(const std::string& message) { + return CmdlineParseResult(kUsage, message); + } + + // Create an error result with the failure error code and no message. + static CmdlineParseResult Failure() { + return CmdlineParseResult(kFailure); + } + + // Create an error result with the failure error code and no message. + static CmdlineParseResult Failure(const std::string& message) { + return CmdlineParseResult(kFailure, message); + } + + // Create a successful result which holds the specified value. + static CmdlineParseResult Success(const T& value) { + return CmdlineParseResult(value); + } + + // Create a successful result, taking over the value. + static CmdlineParseResult Success(T&& value) { + return CmdlineParseResult(std::forward(value)); + } + + // Create succesful result, without any values. Used when a value was successfully appended + // into an existing object. + static CmdlineParseResult SuccessNoValue() { + return CmdlineParseResult(T {}); + } + + // Create an error result with the OutOfRange error and the specified message. + static CmdlineParseResult OutOfRange(const std::string& message) { + return CmdlineParseResult(kOutOfRange, message); + } + + // Create an error result with the OutOfRange code and a custom message + // which is printed from the actual/min/max values. + // Values are converted to string using the ostream<< operator. + static CmdlineParseResult OutOfRange(const T& value, + const T& min, + const T& max) { + return CmdlineParseResult(kOutOfRange, + "actual: " + art::detail::ToStringAny(value) + + ", min: " + art::detail::ToStringAny(min) + + ", max: " + art::detail::ToStringAny(max)); + } + + // Get a read-only reference to the underlying value. + // The result must have been successful and must have a value. + const T& GetValue() const { + assert(IsSuccess()); + assert(has_value_); + return value_; + } + + // Get a mutable reference to the underlying value. + // The result must have been successful and must have a value. + T& GetValue() { + assert(IsSuccess()); + assert(has_value_); + return value_; + } + + // Take over the value. + // The result must have been successful and must have a value. + T&& ReleaseValue() { + assert(IsSuccess()); + assert(has_value_); + return std::move(value_); + } + + // Whether or not the result has a value (e.g. created with Result::Success). + // Error results never have values, success results commonly, but not always, have values. + bool HasValue() const { + return has_value_; + } + + // Cast an error-result from type T2 to T1. + // Safe since error-results don't store a typed value. + template + static CmdlineParseResult CastError(const CmdlineParseResult& other) { + assert(other.IsError()); + return CmdlineParseResult(other.GetStatus()); + } + + // Make sure copying is allowed + CmdlineParseResult(const CmdlineParseResult&) = default; + // Make sure moving is cheap + CmdlineParseResult(CmdlineParseResult&&) = default; + + private: + explicit CmdlineParseResult(const T& value) + : CmdlineResult(kSuccess), value_(value), has_value_(true) {} + explicit CmdlineParseResult(T&& value) + : CmdlineResult(kSuccess), value_(std::forward(value)), has_value_(true) {} + CmdlineParseResult() + : CmdlineResult(kSuccess), value_(), has_value_(false) {} + + T value_; + bool has_value_ = false; +}; + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h new file mode 100644 index 0000000..952be44 --- /dev/null +++ b/cmdline/cmdline_parser.h @@ -0,0 +1,633 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_PARSER_H_ +#define ART_CMDLINE_CMDLINE_PARSER_H_ + +#define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. + +#include "detail/cmdline_debug_detail.h" +#include "detail/cmdline_parse_argument_detail.h" +#include "detail/cmdline_parser_detail.h" + +#include "cmdline_parse_result.h" +#include "cmdline_result.h" +#include "cmdline_type_parser.h" +#include "cmdline_types.h" +#include "token_range.h" + +#include "base/variant_map.h" + +#include +#include + +namespace art { +// Build a parser for command line arguments with a small domain specific language. +// Each parsed type must have a specialized CmdlineType in order to do the string->T parsing. +// Each argument must also have a VariantMap::Key in order to do the T storage. +template class TVariantMapKey> +struct CmdlineParser { + template + struct ArgumentBuilder; + + struct Builder; // Build the parser. + struct UntypedArgumentBuilder; // Build arguments which weren't yet given a type. + + private: + // Forward declare some functions that we need to use before fully-defining structs. + template + static ArgumentBuilder CreateArgumentBuilder(Builder& parent); + static void AppendCompletedArgument(Builder& builder, detail::CmdlineParseArgumentAny* arg); + + // Allow argument definitions to save their values when they are parsed, + // without having a dependency on CmdlineParser or any of the builders. + // + // A shared pointer to the save destination is saved into the load/save argument callbacks. + // + // This also allows the underlying storage (i.e. a variant map) to be released + // to the user, without having to recreate all of the callbacks. + struct SaveDestination { + SaveDestination() : variant_map_(new TVariantMap()) {} + + // Save value to the variant map. + template + void SaveToMap(const TVariantMapKey& key, TArg& value) { + variant_map_->Set(key, value); + } + + // Get the existing value from a map, creating the value if it did not already exist. + template + TArg& GetOrCreateFromMap(const TVariantMapKey& key) { + auto* ptr = variant_map_->Get(key); + if (ptr == nullptr) { + variant_map_->Set(key, TArg()); + ptr = variant_map_->Get(key); + assert(ptr != nullptr); + } + + return *ptr; + } + + protected: + // Release the map, clearing it as a side-effect. + // Future saves will be distinct from previous saves. + TVariantMap&& ReleaseMap() { + return std::move(*variant_map_); + } + + // Get a read-only reference to the variant map. + const TVariantMap& GetMap() { + return *variant_map_; + } + + // Clear all potential save targets. + void Clear() { + variant_map_->Clear(); + } + + private: + // Don't try to copy or move this. Just don't. + SaveDestination(const SaveDestination&) = delete; + SaveDestination(SaveDestination&&) = delete; + SaveDestination& operator=(const SaveDestination&) = delete; + SaveDestination& operator=(SaveDestination&&) = delete; + + std::shared_ptr variant_map_; + + // Allow the parser to change the underlying pointers when we release the underlying storage. + friend struct CmdlineParser; + }; + + public: + // Builder for the argument definition of type TArg. Do not use this type directly, + // it is only a separate type to provide compile-time enforcement against doing + // illegal builds. + template + struct ArgumentBuilder { + // Add a range check to this argument. + ArgumentBuilder& WithRange(const TArg& min, const TArg& max) { + argument_info_.has_range_ = true; + argument_info_.min_ = min; + argument_info_.max_ = max; + + return *this; + } + + // Map the list of names into the list of values. List of names must not have + // any wildcards '_' in it. + // + // Do not use if a value map has already been set. + ArgumentBuilder& WithValues(std::initializer_list value_list) { + SetValuesInternal(value_list); + return *this; + } + + // When used with a single alias, map the alias into this value. + // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. + ArgumentBuilder WithValue(const TArg& value) { + return WithValues({ value }); + } + + // Map the parsed string values (from _) onto a concrete value. If no wildcard + // has been specified, then map the value directly from the arg name (i.e. + // if there are multiple aliases, then use the alias to do the mapping). + // + // Do not use if a values list has already been set. + ArgumentBuilder& WithValueMap( + std::initializer_list> key_value_list) { + assert(!argument_info_.has_value_list_); + + argument_info_.has_value_map_ = true; + argument_info_.value_map_ = key_value_list; + + return *this; + } + + // If this argument is seen multiple times, successive arguments mutate the same value + // instead of replacing it with a new value. + ArgumentBuilder& AppendValues() { + argument_info_.appending_values_ = true; + + return *this; + } + + // Convenience type alias for the variant map key type definition. + using MapKey = TVariantMapKey; + + // Write the results of this argument into the key. + // To look up the parsed arguments, get the map and then use this key with VariantMap::Get + CmdlineParser::Builder& IntoKey(const MapKey& key) { + // Only capture save destination as a pointer. + // This allows the parser to later on change the specific save targets. + auto save_destination = save_destination_; + save_value_ = [save_destination, &key](TArg& value) { + save_destination->SaveToMap(key, value); + CMDLINE_DEBUG_LOG << "Saved value into map '" + << detail::ToStringAny(value) << "'" << std::endl; + }; + + load_value_ = [save_destination, &key]() -> TArg& { + TArg& value = save_destination->GetOrCreateFromMap(key); + CMDLINE_DEBUG_LOG << "Loaded value from map '" << detail::ToStringAny(value) << "'" + << std::endl; + + return value; + }; + + save_value_specified_ = true; + load_value_specified_ = true; + + CompleteArgument(); + return parent_; + } + + // Ensure we always move this when returning a new builder. + ArgumentBuilder(ArgumentBuilder&&) = default; + + protected: + // Used by builder to internally ignore arguments by dropping them on the floor after parsing. + CmdlineParser::Builder& IntoIgnore() { + save_value_ = [](TArg& value) { + CMDLINE_DEBUG_LOG << "Ignored value '" << detail::ToStringAny(value) << "'" << std::endl; + }; + load_value_ = []() -> TArg& { + assert(false && "Should not be appending values to ignored arguments"); + __builtin_trap(); // Blow up. + }; + + save_value_specified_ = true; + load_value_specified_ = true; + + CompleteArgument(); + return parent_; + } + + void SetValuesInternal(const std::vector&& value_list) { + assert(!argument_info_.has_value_map_); + + argument_info_.has_value_list_ = true; + argument_info_.value_list_ = value_list; + } + + void SetNames(std::vector&& names) { + argument_info_.names_ = names; + } + + void SetNames(std::initializer_list names) { + argument_info_.names_ = names; + } + + private: + // Copying is bad. Move only. + ArgumentBuilder(const ArgumentBuilder&) = delete; + + // Called by any function that doesn't chain back into this builder. + // Completes the argument builder and save the information into the main builder. + void CompleteArgument() { + assert(save_value_specified_ && + "No Into... function called, nowhere to save parsed values to"); + assert(load_value_specified_ && + "No Into... function called, nowhere to load parsed values from"); + + argument_info_.CompleteArgument(); + + // Appending the completed argument is destructive. The object is no longer + // usable since all the useful information got moved out of it. + AppendCompletedArgument(parent_, + new detail::CmdlineParseArgument( + std::move(argument_info_), + std::move(save_value_), + std::move(load_value_))); + } + + friend struct CmdlineParser; + friend struct CmdlineParser::Builder; + friend struct CmdlineParser::UntypedArgumentBuilder; + + ArgumentBuilder(CmdlineParser::Builder& parser, + std::shared_ptr save_destination) + : parent_(parser), + save_value_specified_(false), + load_value_specified_(false), + save_destination_(save_destination) { + save_value_ = [](TArg&) { + assert(false && "No save value function defined"); + }; + + load_value_ = []() -> TArg& { + assert(false && "No load value function defined"); + __builtin_trap(); // Blow up. + }; + } + + CmdlineParser::Builder& parent_; + std::function save_value_; + std::function load_value_; + bool save_value_specified_; + bool load_value_specified_; + detail::CmdlineParserArgumentInfo argument_info_; + + std::shared_ptr save_destination_; + }; + + struct UntypedArgumentBuilder { + // Set a type for this argument. The specific subcommand parser is looked up by the type. + template + ArgumentBuilder WithType() { + return CreateTypedBuilder(); + } + + // When used with multiple aliases, map the position of the alias to the value position. + template + ArgumentBuilder WithValues(std::initializer_list values) { + auto&& a = CreateTypedBuilder(); + a.WithValues(values); + return std::move(a); + } + + // When used with a single alias, map the alias into this value. + // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. + template + ArgumentBuilder WithValue(const TArg& value) { + return WithValues({ value }); + } + + // Set the current building argument to target this key. + // When this command line argument is parsed, it can be fetched with this key. + Builder& IntoKey(const TVariantMapKey& key) { + return CreateTypedBuilder().IntoKey(key); + } + + // Ensure we always move this when returning a new builder. + UntypedArgumentBuilder(UntypedArgumentBuilder&&) = default; + + protected: + void SetNames(std::vector&& names) { + names_ = std::move(names); + } + + void SetNames(std::initializer_list names) { + names_ = names; + } + + private: + // No copying. Move instead. + UntypedArgumentBuilder(const UntypedArgumentBuilder&) = delete; + + template + ArgumentBuilder CreateTypedBuilder() { + auto&& b = CreateArgumentBuilder(parent_); + InitializeTypedBuilder(&b); // Type-specific initialization + b.SetNames(std::move(names_)); + return std::move(b); + } + + template + typename std::enable_if::value>::type + InitializeTypedBuilder(ArgumentBuilder* arg_builder) { + // Every Unit argument implicitly maps to a runtime value of Unit{} + std::vector values(names_.size(), Unit{}); + arg_builder->SetValuesInternal(std::move(values)); + } + + // No extra work for all other types + void InitializeTypedBuilder(void*) {} + + template + friend struct ArgumentBuilder; + friend struct Builder; + + explicit UntypedArgumentBuilder(CmdlineParser::Builder& parent) : parent_(parent) {} + // UntypedArgumentBuilder(UntypedArgumentBuilder&& other) = default; + + CmdlineParser::Builder& parent_; + std::vector names_; + }; + + // Build a new parser given a chain of calls to define arguments. + struct Builder { + Builder() : save_destination_(new SaveDestination()) {} + + // Define a single argument. The default type is Unit. + UntypedArgumentBuilder Define(const char* name) { + return Define({name}); + } + + // Define a single argument with multiple aliases. + UntypedArgumentBuilder Define(std::initializer_list names) { + auto&& b = UntypedArgumentBuilder(*this); + b.SetNames(names); + return std::move(b); + } + + // Whether the parser should give up on unrecognized arguments. Not recommended. + Builder& IgnoreUnrecognized(bool ignore_unrecognized) { + ignore_unrecognized_ = ignore_unrecognized; + return *this; + } + + // Provide a list of arguments to ignore for backwards compatibility. + Builder& Ignore(std::initializer_list ignore_list) { + for (auto&& ignore_name : ignore_list) { + std::string ign = ignore_name; + + // Ignored arguments are just like a regular definition which have very + // liberal parsing requirements (no range checks, no value checks). + // Unlike regular argument definitions, when a value gets parsed into its + // stronger type, we just throw it away. + + if (ign.find('_') != std::string::npos) { // Does the arg-def have a wildcard? + // pretend this is a string, e.g. -Xjitconfig: + auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); + assert(&builder == this); + (void)builder; // Ignore pointless unused warning, it's used in the assert. + } else { + // pretend this is a unit, e.g. -Xjitblocking + auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); + assert(&builder == this); + (void)builder; // Ignore pointless unused warning, it's used in the assert. + } + } + ignore_list_ = ignore_list; + return *this; + } + + // Finish building the parser; performs sanity checks. Return value is moved, not copied. + // Do not call this more than once. + CmdlineParser Build() { + assert(!built_); + built_ = true; + + auto&& p = CmdlineParser(ignore_unrecognized_, + std::move(ignore_list_), + save_destination_, + std::move(completed_arguments_)); + + return std::move(p); + } + + protected: + void AppendCompletedArgument(detail::CmdlineParseArgumentAny* arg) { + auto smart_ptr = std::unique_ptr(arg); + completed_arguments_.push_back(std::move(smart_ptr)); + } + + private: + // No copying now! + Builder(const Builder& other) = delete; + + template + friend struct ArgumentBuilder; + friend struct UntypedArgumentBuilder; + friend struct CmdlineParser; + + bool built_ = false; + bool ignore_unrecognized_ = false; + std::vector ignore_list_; + std::shared_ptr save_destination_; + + std::vector> completed_arguments_; + }; + + CmdlineResult Parse(const std::string& argv) { + std::vector tokenized; + Split(argv, ' ', &tokenized); + + return Parse(TokenRange(std::move(tokenized))); + } + + // Parse the arguments; storing results into the arguments map. Returns success value. + CmdlineResult Parse(const char* argv) { + return Parse(std::string(argv)); + } + + // Parse the arguments; storing the results into the arguments map. Returns success value. + // Assumes that argv[0] is a valid argument (i.e. not the program name). + CmdlineResult Parse(const std::vector& argv) { + return Parse(TokenRange(argv.begin(), argv.end())); + } + + // Parse the arguments; storing the results into the arguments map. Returns success value. + // Assumes that argv[0] is a valid argument (i.e. not the program name). + CmdlineResult Parse(const std::vector& argv) { + return Parse(TokenRange(argv.begin(), argv.end())); + } + + // Parse the arguments (directly from an int main(argv,argc)). Returns success value. + // Assumes that argv[0] is the program name, and ignores it. + CmdlineResult Parse(const char* argv[], int argc) { + return Parse(TokenRange(&argv[1], argc - 1)); // ignore argv[0] because it's the program name + } + + // Look up the arguments that have been parsed; use the target keys to lookup individual args. + const TVariantMap& GetArgumentsMap() const { + return save_destination_->GetMap(); + } + + // Release the arguments map that has been parsed; useful for move semantics. + TVariantMap&& ReleaseArgumentsMap() { + return save_destination_->ReleaseMap(); + } + + // How many arguments were defined? + size_t CountDefinedArguments() const { + return completed_arguments_.size(); + } + + // Ensure we have a default move constructor. + CmdlineParser(CmdlineParser&&) = default; + // Ensure we have a default move assignment operator. + CmdlineParser& operator=(CmdlineParser&&) = default; + + private: + friend struct Builder; + + // Construct a new parser from the builder. Move all the arguments. + CmdlineParser(bool ignore_unrecognized, + std::vector&& ignore_list, + std::shared_ptr save_destination, + std::vector>&& completed_arguments) + : ignore_unrecognized_(ignore_unrecognized), + ignore_list_(std::move(ignore_list)), + save_destination_(save_destination), + completed_arguments_(std::move(completed_arguments)) { + assert(save_destination != nullptr); + } + + // Parse the arguments; storing results into the arguments map. Returns success value. + // The parsing will fail on the first non-success parse result and return that error. + // + // All previously-parsed arguments are cleared out. + // Otherwise, all parsed arguments will be stored into SaveDestination as a side-effect. + // A partial parse will result only in a partial save of the arguments. + CmdlineResult Parse(TokenRange&& arguments_list) { + save_destination_->Clear(); + + for (size_t i = 0; i < arguments_list.Size(); ) { + TokenRange possible_name = arguments_list.Slice(i); + + size_t best_match_size = 0; // How many tokens were matched in the best case. + size_t best_match_arg_idx = 0; + bool matched = false; // At least one argument definition has been matched? + + // Find the closest argument definition for the remaining token range. + size_t arg_idx = 0; + for (auto&& arg : completed_arguments_) { + size_t local_match = arg->MaybeMatches(possible_name); + + if (local_match > best_match_size) { + best_match_size = local_match; + best_match_arg_idx = arg_idx; + matched = true; + } + arg_idx++; + } + + // Saw some kind of unknown argument + if (matched == false) { + if (UNLIKELY(ignore_unrecognized_)) { // This is usually off, we only need it for JNI. + // Consume 1 token and keep going, hopefully the next token is a good one. + ++i; + continue; + } + // Common case: + // Bail out on the first unknown argument with an error. + return CmdlineResult(CmdlineResult::kUnknown, + std::string("Unknown argument: ") + possible_name[0]); + } + + // Look at the best-matched argument definition and try to parse against that. + auto&& arg = completed_arguments_[best_match_arg_idx]; + + assert(arg->MaybeMatches(possible_name) == best_match_size); + + // Try to parse the argument now, if we have enough tokens. + std::pair num_tokens = arg->GetNumTokens(); + size_t min_tokens; + size_t max_tokens; + + std::tie(min_tokens, max_tokens) = num_tokens; + + if ((i + min_tokens) > arguments_list.Size()) { + // expected longer command line but it was too short + // e.g. if the argv was only "-Xms" without specifying a memory option + CMDLINE_DEBUG_LOG << "Parse failure, i = " << i << ", arg list " << arguments_list.Size() << + " num tokens in arg_def: " << min_tokens << "," << max_tokens << std::endl; + return CmdlineResult(CmdlineResult::kFailure, + std::string("Argument ") + + possible_name[0] + ": incomplete command line arguments, expected " + + std::to_string(size_t(i + min_tokens) - arguments_list.Size()) + + " more tokens"); + } + + if (best_match_size > max_tokens || best_match_size < min_tokens) { + // Even our best match was out of range, so parsing would fail instantly. + return CmdlineResult(CmdlineResult::kFailure, + std::string("Argument ") + possible_name[0] + ": too few tokens " + "matched " + std::to_string(best_match_size) + + " but wanted " + std::to_string(num_tokens.first)); + } + + // We have enough tokens to begin exact parsing. + TokenRange exact_range = possible_name.Slice(0, max_tokens); + + size_t consumed_tokens = 1; // At least 1 if we ever want to try to resume parsing on error + CmdlineResult parse_attempt = arg->ParseArgument(exact_range, &consumed_tokens); + + if (parse_attempt.IsError()) { + // We may also want to continue parsing the other tokens to gather more errors. + return parse_attempt; + } // else the value has been successfully stored into the map + + assert(consumed_tokens > 0); // Don't hang in an infinite loop trying to parse + i += consumed_tokens; + + // TODO: also handle ignoring arguments for backwards compatibility + } // for + + return CmdlineResult(CmdlineResult::kSuccess); + } + + bool ignore_unrecognized_ = false; + std::vector ignore_list_; + std::shared_ptr save_destination_; + std::vector> completed_arguments_; +}; + +// This has to be defined after everything else, since we want the builders to call this. +template class TVariantMapKey> +template +typename CmdlineParser::template ArgumentBuilder +CmdlineParser::CreateArgumentBuilder( + CmdlineParser::Builder& parent) { + return CmdlineParser::ArgumentBuilder( + parent, parent.save_destination_); +} + +// This has to be defined after everything else, since we want the builders to call this. +template class TVariantMapKey> +void CmdlineParser::AppendCompletedArgument( + CmdlineParser::Builder& builder, + detail::CmdlineParseArgumentAny* arg) { + builder.AppendCompletedArgument(arg); +} + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_PARSER_H_ diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc new file mode 100644 index 0000000..37dcd16 --- /dev/null +++ b/cmdline/cmdline_parser_test.cc @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cmdline_parser.h" + +#include + +#include "gtest/gtest.h" + +#include "base/utils.h" +#include "jdwp_provider.h" +#include "experimental_flags.h" +#include "parsed_options.h" +#include "runtime.h" +#include "runtime_options.h" + +#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast(expected), \ + reinterpret_cast(nullptr)); + +namespace art { + bool UsuallyEquals(double expected, double actual); + + // This has a gtest dependency, which is why it's in the gtest only. + bool operator==(const ProfileSaverOptions& lhs, const ProfileSaverOptions& rhs) { + return lhs.enabled_ == rhs.enabled_ && + lhs.min_save_period_ms_ == rhs.min_save_period_ms_ && + lhs.save_resolved_classes_delay_ms_ == rhs.save_resolved_classes_delay_ms_ && + lhs.hot_startup_method_samples_ == rhs.hot_startup_method_samples_ && + lhs.min_methods_to_save_ == rhs.min_methods_to_save_ && + lhs.min_classes_to_save_ == rhs.min_classes_to_save_ && + lhs.min_notification_before_wake_ == rhs.min_notification_before_wake_ && + lhs.max_notification_before_wake_ == rhs.max_notification_before_wake_; + } + + bool UsuallyEquals(double expected, double actual) { + using FloatingPoint = ::testing::internal::FloatingPoint; + + FloatingPoint exp(expected); + FloatingPoint act(actual); + + // Compare with ULPs instead of comparing with == + return exp.AlmostEquals(act); + } + + template + bool UsuallyEquals(const T& expected, const T& actual, + typename std::enable_if< + detail::SupportsEqualityOperator::value>::type* = nullptr) { + return expected == actual; + } + + template + bool UsuallyEquals(const std::vector& expected, + const ParseStringList& actual) { + return expected == static_cast>(actual); + } + + // Try to use memcmp to compare simple plain-old-data structs. + // + // This should *not* generate false positives, but it can generate false negatives. + // This will mostly work except for fields like float which can have different bit patterns + // that are nevertheless equal. + // If a test is failing because the structs aren't "equal" when they really are + // then it's recommended to implement operator== for it instead. + template + bool UsuallyEquals(const T& expected, const T& actual, + const Ignore& ... more ATTRIBUTE_UNUSED, + typename std::enable_if::value>::type* = nullptr, + typename std::enable_if::value>::type* = nullptr + ) { + return memcmp(std::addressof(expected), std::addressof(actual), sizeof(T)) == 0; + } + + bool UsuallyEquals(const XGcOption& expected, const XGcOption& actual) { + return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0; + } + + bool UsuallyEquals(const char* expected, const std::string& actual) { + return std::string(expected) == actual; + } + + template + ::testing::AssertionResult IsExpectedKeyValue(const T& expected, + const TMap& map, + const TKey& key) { + auto* actual = map.Get(key); + if (actual != nullptr) { + if (!UsuallyEquals(expected, *actual)) { + return ::testing::AssertionFailure() + << "expected " << detail::ToStringAny(expected) << " but got " + << detail::ToStringAny(*actual); + } + return ::testing::AssertionSuccess(); + } + + return ::testing::AssertionFailure() << "key was not in the map"; + } + + template + ::testing::AssertionResult IsExpectedDefaultKeyValue(const T& expected, + const TMap& map, + const TKey& key) { + const T& actual = map.GetOrDefault(key); + if (!UsuallyEquals(expected, actual)) { + return ::testing::AssertionFailure() + << "expected " << detail::ToStringAny(expected) << " but got " + << detail::ToStringAny(actual); + } + return ::testing::AssertionSuccess(); + } + +class CmdlineParserTest : public ::testing::Test { + public: + CmdlineParserTest() = default; + ~CmdlineParserTest() = default; + + protected: + using M = RuntimeArgumentMap; + using RuntimeParser = ParsedOptions::RuntimeParser; + + static void SetUpTestCase() { + art::Locks::Init(); + art::InitLogging(nullptr, art::Runtime::Abort); // argv = null + } + + void SetUp() override { + parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options + } + + static ::testing::AssertionResult IsResultSuccessful(const CmdlineResult& result) { + if (result.IsSuccess()) { + return ::testing::AssertionSuccess(); + } else { + return ::testing::AssertionFailure() + << result.GetStatus() << " with: " << result.GetMessage(); + } + } + + static ::testing::AssertionResult IsResultFailure(const CmdlineResult& result, + CmdlineResult::Status failure_status) { + if (result.IsSuccess()) { + return ::testing::AssertionFailure() << " got success but expected failure: " + << failure_status; + } else if (result.GetStatus() == failure_status) { + return ::testing::AssertionSuccess(); + } + + return ::testing::AssertionFailure() << " expected failure " << failure_status + << " but got " << result.GetStatus(); + } + + std::unique_ptr parser_; +}; + +#define EXPECT_KEY_EXISTS(map, key) EXPECT_TRUE((map).Exists(key)) +#define EXPECT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedKeyValue(expected, map, key)) +#define EXPECT_DEFAULT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedDefaultKeyValue(expected, map, key)) + +#define _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ + do { \ + EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ + EXPECT_EQ(0u, parser_->GetArgumentsMap().Size()); \ + +#define EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ + _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ + } while (false) + +#define EXPECT_SINGLE_PARSE_DEFAULT_VALUE(expected, argv, key)\ + _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ + EXPECT_DEFAULT_KEY_VALUE(args, key, expected); \ + } while (false) // NOLINT [readability/namespace] [5] + +#define _EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ + do { \ + EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ + EXPECT_EQ(1u, args.Size()); \ + EXPECT_KEY_EXISTS(args, key); \ + +#define EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ + _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ + } while (false) + +#define EXPECT_SINGLE_PARSE_VALUE(expected, argv, key) \ + _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ + EXPECT_KEY_VALUE(args, key, expected); \ + } while (false) + +#define EXPECT_SINGLE_PARSE_VALUE_STR(expected, argv, key) \ + EXPECT_SINGLE_PARSE_VALUE(std::string(expected), argv, key) + +#define EXPECT_SINGLE_PARSE_FAIL(argv, failure_status) \ + do { \ + EXPECT_TRUE(IsResultFailure(parser_->Parse(argv), failure_status));\ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap();\ + EXPECT_EQ(0u, args.Size()); \ + } while (false) + +TEST_F(CmdlineParserTest, TestSimpleSuccesses) { + auto& parser = *parser_; + + EXPECT_LT(0u, parser.CountDefinedArguments()); + + { + // Test case 1: No command line arguments + EXPECT_TRUE(IsResultSuccessful(parser.Parse(""))); + RuntimeArgumentMap args = parser.ReleaseArgumentsMap(); + EXPECT_EQ(0u, args.Size()); + } + + EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote); + EXPECT_SINGLE_PARSE_VALUE(std::vector({"/hello/world"}), + "-Xbootclasspath:/hello/world", + M::BootClassPath); + EXPECT_SINGLE_PARSE_VALUE(std::vector({"/hello", "/world"}), + "-Xbootclasspath:/hello:/world", + M::BootClassPath); + EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-classpath /hello/world", M::ClassPath); + EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize); + EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize); + EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); + EXPECT_SINGLE_PARSE_VALUE(false, "-XX:DisableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); + EXPECT_SINGLE_PARSE_VALUE(0.5, "-XX:HeapTargetUtilization=0.5", M::HeapTargetUtilization); + EXPECT_SINGLE_PARSE_VALUE(5u, "-XX:ParallelGCThreads=5", M::ParallelGCThreads); + EXPECT_SINGLE_PARSE_EXISTS("-Xno-dex-file-fallback", M::NoDexFileFallback); +} // TEST_F + +TEST_F(CmdlineParserTest, TestSimpleFailures) { + // Test argument is unknown to the parser + EXPECT_SINGLE_PARSE_FAIL("abcdefg^%@#*(@#", CmdlineResult::kUnknown); + // Test value map substitution fails + EXPECT_SINGLE_PARSE_FAIL("-Xverify:whatever", CmdlineResult::kFailure); + // Test value type parsing failures + EXPECT_SINGLE_PARSE_FAIL("-Xsswhatever", CmdlineResult::kFailure); // invalid memory value + EXPECT_SINGLE_PARSE_FAIL("-Xms123", CmdlineResult::kFailure); // memory value too small + EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=0.0", CmdlineResult::kOutOfRange); // toosmal + EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=2.0", CmdlineResult::kOutOfRange); // toolarg + EXPECT_SINGLE_PARSE_FAIL("-XX:ParallelGCThreads=-5", CmdlineResult::kOutOfRange); // too small + EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // not a valid suboption +} // TEST_F + +TEST_F(CmdlineParserTest, TestLogVerbosity) { + { + const char* log_args = "-verbose:" + "class,compiler,gc,heap,interpreter,jdwp,jni,monitor,profiler,signals,simulator,startup," + "third-party-jni,threads,verifier,verifier-debug"; + + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.class_linker = true; + log_verbosity.compiler = true; + log_verbosity.gc = true; + log_verbosity.heap = true; + log_verbosity.interpreter = true; + log_verbosity.jdwp = true; + log_verbosity.jni = true; + log_verbosity.monitor = true; + log_verbosity.profiler = true; + log_verbosity.signals = true; + log_verbosity.simulator = true; + log_verbosity.startup = true; + log_verbosity.third_party_jni = true; + log_verbosity.threads = true; + log_verbosity.verifier = true; + log_verbosity.verifier_debug = true; + + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:" + "class,compiler,gc,heap,jdwp,jni,monitor"; + + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.class_linker = true; + log_verbosity.compiler = true; + log_verbosity.gc = true; + log_verbosity.heap = true; + log_verbosity.jdwp = true; + log_verbosity.jni = true; + log_verbosity.monitor = true; + + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + EXPECT_SINGLE_PARSE_FAIL("-verbose:blablabla", CmdlineResult::kUsage); // invalid verbose opt + + { + const char* log_args = "-verbose:deopt"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.deopt = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:collector"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.collector = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:oat"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.oat = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:dex"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.dex = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } +} // TEST_F + +// TODO: Enable this b/19274810 +TEST_F(CmdlineParserTest, DISABLED_TestXGcOption) { + /* + * Test success + */ + { + XGcOption option_all_true{}; + option_all_true.collector_type_ = gc::CollectorType::kCollectorTypeCMS; + option_all_true.verify_pre_gc_heap_ = true; + option_all_true.verify_pre_sweeping_heap_ = true; + option_all_true.verify_post_gc_heap_ = true; + option_all_true.verify_pre_gc_rosalloc_ = true; + option_all_true.verify_pre_sweeping_rosalloc_ = true; + option_all_true.verify_post_gc_rosalloc_ = true; + + const char * xgc_args_all_true = "-Xgc:concurrent," + "preverify,presweepingverify,postverify," + "preverify_rosalloc,presweepingverify_rosalloc," + "postverify_rosalloc,precise," + "verifycardtable"; + + EXPECT_SINGLE_PARSE_VALUE(option_all_true, xgc_args_all_true, M::GcOption); + + XGcOption option_all_false{}; + option_all_false.collector_type_ = gc::CollectorType::kCollectorTypeMS; + option_all_false.verify_pre_gc_heap_ = false; + option_all_false.verify_pre_sweeping_heap_ = false; + option_all_false.verify_post_gc_heap_ = false; + option_all_false.verify_pre_gc_rosalloc_ = false; + option_all_false.verify_pre_sweeping_rosalloc_ = false; + option_all_false.verify_post_gc_rosalloc_ = false; + + const char* xgc_args_all_false = "-Xgc:nonconcurrent," + "nopreverify,nopresweepingverify,nopostverify,nopreverify_rosalloc," + "nopresweepingverify_rosalloc,nopostverify_rosalloc,noprecise,noverifycardtable"; + + EXPECT_SINGLE_PARSE_VALUE(option_all_false, xgc_args_all_false, M::GcOption); + + XGcOption option_all_default{}; + + const char* xgc_args_blank = "-Xgc:"; + EXPECT_SINGLE_PARSE_VALUE(option_all_default, xgc_args_blank, M::GcOption); + } + + /* + * Test failures + */ + EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // invalid Xgc opt +} // TEST_F + +/* + * { "-XjdwpProvider:_" } + */ +TEST_F(CmdlineParserTest, TestJdwpProviderEmpty) { + { + EXPECT_SINGLE_PARSE_DEFAULT_VALUE(JdwpProvider::kUnset, "", M::JdwpProvider); + } +} // TEST_F + +TEST_F(CmdlineParserTest, TestJdwpProviderDefault) { + const char* opt_args = "-XjdwpProvider:default"; + EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kDefaultJdwpProvider, opt_args, M::JdwpProvider); +} // TEST_F + +TEST_F(CmdlineParserTest, TestJdwpProviderNone) { + const char* opt_args = "-XjdwpProvider:none"; + EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kNone, opt_args, M::JdwpProvider); +} // TEST_F + +TEST_F(CmdlineParserTest, TestJdwpProviderAdbconnection) { + const char* opt_args = "-XjdwpProvider:adbconnection"; + EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kAdbConnection, opt_args, M::JdwpProvider); +} // TEST_F + +TEST_F(CmdlineParserTest, TestJdwpProviderHelp) { + EXPECT_SINGLE_PARSE_FAIL("-XjdwpProvider:help", CmdlineResult::kUsage); +} // TEST_F + +TEST_F(CmdlineParserTest, TestJdwpProviderFail) { + EXPECT_SINGLE_PARSE_FAIL("-XjdwpProvider:blablabla", CmdlineResult::kFailure); +} // TEST_F + +/* + * -D_ -D_ -D_ ... + */ +TEST_F(CmdlineParserTest, TestPropertiesList) { + /* + * Test successes + */ + { + std::vector opt = {"hello"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello", M::PropertiesList); + } + + { + std::vector opt = {"hello", "world"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello -Dworld", M::PropertiesList); + } + + { + std::vector opt = {"one", "two", "three"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Done -Dtwo -Dthree", M::PropertiesList); + } +} // TEST_F + +/* +* -Xcompiler-option foo -Xcompiler-option bar ... +*/ +TEST_F(CmdlineParserTest, TestCompilerOption) { + /* + * Test successes + */ + { + std::vector opt = {"hello"}; + EXPECT_SINGLE_PARSE_VALUE(opt, "-Xcompiler-option hello", M::CompilerOptions); + } + + { + std::vector opt = {"hello", "world"}; + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xcompiler-option hello -Xcompiler-option world", + M::CompilerOptions); + } + + { + std::vector opt = {"one", "two", "three"}; + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xcompiler-option one -Xcompiler-option two -Xcompiler-option three", + M::CompilerOptions); + } +} // TEST_F + +/* +* -Xjit, -Xnojit, -Xjitcodecachesize, Xjitcompilethreshold +*/ +TEST_F(CmdlineParserTest, TestJitOptions) { + /* + * Test successes + */ + { + EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation); + EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation); + } + { + EXPECT_SINGLE_PARSE_VALUE( + MemoryKiB(16 * KB), "-Xjitinitialsize:16K", M::JITCodeCacheInitialCapacity); + EXPECT_SINGLE_PARSE_VALUE( + MemoryKiB(16 * MB), "-Xjitmaxsize:16M", M::JITCodeCacheMaxCapacity); + } + { + EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold); + } +} // TEST_F + +/* +* -Xps-* +*/ +TEST_F(CmdlineParserTest, ProfileSaverOptions) { + ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc", true); + + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xjitsaveprofilinginfo " + "-Xps-min-save-period-ms:1 " + "-Xps-save-resolved-classes-delay-ms:2 " + "-Xps-hot-startup-method-samples:3 " + "-Xps-min-methods-to-save:4 " + "-Xps-min-classes-to-save:5 " + "-Xps-min-notification-before-wake:6 " + "-Xps-max-notification-before-wake:7 " + "-Xps-profile-path:abc " + "-Xps-profile-boot-class-path", + M::ProfileSaverOpts); +} // TEST_F + +/* -Xexperimental:_ */ +TEST_F(CmdlineParserTest, TestExperimentalFlags) { + // Default + EXPECT_SINGLE_PARSE_DEFAULT_VALUE(ExperimentalFlags::kNone, + "", + M::Experimental); + + // Disabled explicitly + EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone, + "-Xexperimental:none", + M::Experimental); +} + +// -Xverify:_ +TEST_F(CmdlineParserTest, TestVerify) { + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kNone, "-Xverify:none", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:remote", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:all", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kSoftFail, "-Xverify:softfail", M::Verify); +} + +TEST_F(CmdlineParserTest, TestIgnoreUnrecognized) { + RuntimeParser::Builder parserBuilder; + + parserBuilder + .Define("-help") + .IntoKey(M::Help) + .IgnoreUnrecognized(true); + + parser_.reset(new RuntimeParser(parserBuilder.Build())); + + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option"); + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option1 --non-existent-option-2"); +} // TEST_F + +TEST_F(CmdlineParserTest, TestIgnoredArguments) { + std::initializer_list ignored_args = { + "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa", + "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:abdef", + "-Xdexopt:foobar", "-Xnoquithandler", "-Xjnigreflimit:ixnay", "-Xgenregmap", "-Xnogenregmap", + "-Xverifyopt:never", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:noop", + "-Xincludeselectedmethod", "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck", + "-Xjitoffset:none", "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile", + "-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=1337" + }; + + // Check they are ignored when parsed one at a time + for (auto&& arg : ignored_args) { + SCOPED_TRACE(arg); + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(arg); + } + + // Check they are ignored when we pass it all together at once + std::vector argv = ignored_args; + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); +} // TEST_F + +TEST_F(CmdlineParserTest, MultipleArguments) { + EXPECT_TRUE(IsResultSuccessful(parser_->Parse( + "-help -XX:ForegroundHeapGrowthMultiplier=0.5 " + "-Xmethod-trace -XX:LargeObjectSpace=map"))); + + auto&& map = parser_->ReleaseArgumentsMap(); + EXPECT_EQ(4u, map.Size()); + EXPECT_KEY_VALUE(map, M::Help, Unit{}); + EXPECT_KEY_VALUE(map, M::ForegroundHeapGrowthMultiplier, 0.5); + EXPECT_KEY_VALUE(map, M::MethodTrace, Unit{}); + EXPECT_KEY_VALUE(map, M::LargeObjectSpace, gc::space::LargeObjectSpaceType::kMap); +} // TEST_F + +TEST_F(CmdlineParserTest, TypesNotInRuntime) { + CmdlineType> ct; + auto success0 = + CmdlineParseResult>::Success(std::vector({1, 2, 3, 4})); + EXPECT_EQ(success0, ct.Parse("1,2,3,4")); + auto success1 = CmdlineParseResult>::Success(std::vector({0})); + EXPECT_EQ(success1, ct.Parse("1")); + + EXPECT_FALSE(ct.Parse("").IsSuccess()); + EXPECT_FALSE(ct.Parse(",").IsSuccess()); + EXPECT_FALSE(ct.Parse("1,").IsSuccess()); + EXPECT_FALSE(ct.Parse(",1").IsSuccess()); + EXPECT_FALSE(ct.Parse("1a2").IsSuccess()); + EXPECT_EQ(CmdlineResult::kOutOfRange, ct.Parse("1,10000000000000").GetStatus()); + EXPECT_EQ(CmdlineResult::kOutOfRange, ct.Parse("-10000000000000,123").GetStatus()); +} // TEST_F +} // namespace art diff --git a/cmdline/cmdline_result.h b/cmdline/cmdline_result.h new file mode 100644 index 0000000..0ae1145 --- /dev/null +++ b/cmdline/cmdline_result.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_RESULT_H_ +#define ART_CMDLINE_CMDLINE_RESULT_H_ + +#include +#include "base/utils.h" + +namespace art { +// Result of an attempt to process the command line arguments. If fails, specifies +// the specific error code and an error message. +// Use the value-carrying CmdlineParseResult to get an additional value out in a success case. +struct CmdlineResult { + enum Status { + kSuccess, + // Error codes: + kUsage, + kFailure, + kOutOfRange, + kUnknown, + }; + + // Short-hand for checking if the result was successful. + operator bool() const { + return IsSuccess(); + } + + // Check if the operation has succeeded. + bool IsSuccess() const { return status_ == kSuccess; } + // Check if the operation was not a success. + bool IsError() const { return status_ != kSuccess; } + // Get the specific status, regardless of whether it's failure or success. + Status GetStatus() const { return status_; } + + // Get the error message, *must* only be called for error status results. + const std::string& GetMessage() const { assert(IsError()); return message_; } + + // Constructor any status. No message. + explicit CmdlineResult(Status status) : status_(status) {} + + // Constructor with an error status, copying the message. + CmdlineResult(Status status, const std::string& message) + : status_(status), message_(message) { + assert(status != kSuccess); + } + + // Constructor with an error status, taking over the message. + CmdlineResult(Status status, std::string&& message) + : status_(status), message_(message) { + assert(status != kSuccess); + } + + // Make sure copying exists + CmdlineResult(const CmdlineResult&) = default; + // Make sure moving is cheap + CmdlineResult(CmdlineResult&&) = default; + + private: + const Status status_; + const std::string message_; +}; + +// TODO: code-generate this +static inline std::ostream& operator<<(std::ostream& stream, CmdlineResult::Status status) { + switch (status) { + case CmdlineResult::kSuccess: + stream << "kSuccess"; + break; + case CmdlineResult::kUsage: + stream << "kUsage"; + break; + case CmdlineResult::kFailure: + stream << "kFailure"; + break; + case CmdlineResult::kOutOfRange: + stream << "kOutOfRange"; + break; + case CmdlineResult::kUnknown: + stream << "kUnknown"; + break; + default: + UNREACHABLE(); + } + return stream; +} +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_RESULT_H_ diff --git a/cmdline/cmdline_type_parser.h b/cmdline/cmdline_type_parser.h new file mode 100644 index 0000000..fa5cdaf --- /dev/null +++ b/cmdline/cmdline_type_parser.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ +#define ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ + +#include "cmdline_parse_result.h" + +namespace art { + +// Base class for user-defined CmdlineType specializations. +// +// Not strictly necessary, but if the specializations fail to Define all of these functions +// the compilation will fail. +template +struct CmdlineTypeParser { + // Return value of parsing attempts. Represents a Success(T value) or an Error(int code) + using Result = CmdlineParseResult; + + // Parse a single value for an argument definition out of the wildcard component. + // + // e.g. if the argument definition was "foo:_", and the user-provided input was "foo:bar", + // then args is "bar". + Result Parse(const std::string& args ATTRIBUTE_UNUSED) { + assert(false); + return Result::Failure("Missing type specialization and/or value map"); + } + + // Parse a value and append it into the existing value so far, for argument + // definitions which are marked with AppendValues(). + // + // The value is parsed out of the wildcard component as in Parse. + // + // If the initial value does not exist yet, a default value is created by + // value-initializing with 'T()'. + Result ParseAndAppend(const std::string& args ATTRIBUTE_UNUSED, + T& existing_value ATTRIBUTE_UNUSED) { + assert(false); + return Result::Failure("Missing type specialization and/or value map"); + } + + // Runtime type name of T, so that we can print more useful error messages. + static const char* Name() { assert(false); return "UnspecializedType"; } + + // Whether or not your type can parse argument definitions defined without a "_" + // e.g. -Xenable-profiler just mutates the existing profiler struct in-place + // so it doesn't need to do any parsing other than token recognition. + // + // If this is false, then either the argument definition has a _, from which the parsing + // happens, or the tokens get mapped to a value list/map from which a 1:1 matching occurs. + // + // This should almost *always* be false! + static constexpr bool kCanParseBlankless = false; + + protected: + // Don't accidentally initialize instances of this directly; they will assert at runtime. + CmdlineTypeParser() = default; +}; + + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h new file mode 100644 index 0000000..25902f1 --- /dev/null +++ b/cmdline/cmdline_types.h @@ -0,0 +1,774 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ART_CMDLINE_CMDLINE_TYPES_H_ +#define ART_CMDLINE_CMDLINE_TYPES_H_ + +#define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. + +#include + +#include "cmdline_type_parser.h" +#include "detail/cmdline_debug_detail.h" +#include "memory_representation.h" + +#include "android-base/logging.h" +#include "android-base/strings.h" + +// Includes for the types that are being specialized +#include +#include "base/time_utils.h" +#include "base/logging.h" +#include "experimental_flags.h" +#include "gc/collector_type.h" +#include "gc/space/large_object_space.h" +#include "jdwp_provider.h" +#include "jit/profile_saver_options.h" +#include "plugin.h" +#include "read_barrier_config.h" +#include "ti/agent.h" +#include "unit.h" + +namespace art { + +// The default specialization will always fail parsing the type from a string. +// Provide your own specialization that inherits from CmdlineTypeParser +// and implements either Parse or ParseAndAppend +// (only if the argument was defined with ::AppendValues()) but not both. +template +struct CmdlineType : CmdlineTypeParser { +}; + +// Specializations for CmdlineType follow: + +// Parse argument definitions for Unit-typed arguments. +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& args) { + if (args == "") { + return Result::Success(Unit{}); + } + return Result::Failure("Unexpected extra characters " + args); + } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + /* + * Handle a single JDWP provider name. Must be either 'internal', 'default', or the file name of + * an agent. A plugin will make use of this and the jdwpOptions to set up jdwp when appropriate. + */ + Result Parse(const std::string& option) { + if (option == "help") { + return Result::Usage( + "Example: -XjdwpProvider:none to disable JDWP\n" + "Example: -XjdwpProvider:adbconnection for adb connection mediated jdwp implementation\n" + "Example: -XjdwpProvider:default for the default jdwp implementation\n"); + } else if (option == "default") { + return Result::Success(JdwpProvider::kDefaultJdwpProvider); + } else if (option == "adbconnection") { + return Result::Success(JdwpProvider::kAdbConnection); + } else if (option == "none") { + return Result::Success(JdwpProvider::kNone); + } else { + return Result::Failure(std::string("not a valid jdwp provider: ") + option); + } + } + static const char* Name() { return "JdwpProvider"; } +}; + +template +struct CmdlineType> : CmdlineTypeParser> { + using typename CmdlineTypeParser>::Result; + + Result Parse(const std::string& arg) { + CMDLINE_DEBUG_LOG << "Parsing memory: " << arg << std::endl; + size_t val = ParseMemoryOption(arg.c_str(), Divisor); + CMDLINE_DEBUG_LOG << "Memory parsed to size_t value: " << val << std::endl; + + if (val == 0) { + return Result::Failure(std::string("not a valid memory value, or not divisible by ") + + std::to_string(Divisor)); + } + + return Result::Success(Memory(val)); + } + + // Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify + // memory sizes. [kK] indicates kilobytes, [mM] megabytes, and + // [gG] gigabytes. + // + // "s" should point just past the "-Xm?" part of the string. + // "div" specifies a divisor, e.g. 1024 if the value must be a multiple + // of 1024. + // + // The spec says the -Xmx and -Xms options must be multiples of 1024. It + // doesn't say anything about -Xss. + // + // Returns 0 (a useless size) if "s" is malformed or specifies a low or + // non-evenly-divisible value. + // + static size_t ParseMemoryOption(const char* s, size_t div) { + // strtoul accepts a leading [+-], which we don't want, + // so make sure our string starts with a decimal digit. + if (isdigit(*s)) { + char* s2; + size_t val = strtoul(s, &s2, 10); + if (s2 != s) { + // s2 should be pointing just after the number. + // If this is the end of the string, the user + // has specified a number of bytes. Otherwise, + // there should be exactly one more character + // that specifies a multiplier. + if (*s2 != '\0') { + // The remainder of the string is either a single multiplier + // character, or nothing to indicate that the value is in + // bytes. + char c = *s2++; + if (*s2 == '\0') { + size_t mul; + if (c == '\0') { + mul = 1; + } else if (c == 'k' || c == 'K') { + mul = KB; + } else if (c == 'm' || c == 'M') { + mul = MB; + } else if (c == 'g' || c == 'G') { + mul = GB; + } else { + // Unknown multiplier character. + return 0; + } + + if (val <= std::numeric_limits::max() / mul) { + val *= mul; + } else { + // Clamp to a multiple of 1024. + val = std::numeric_limits::max() & ~(1024-1); + } + } else { + // There's more than one character after the numeric part. + return 0; + } + } + // The man page says that a -Xm value must be a multiple of 1024. + if (val % div == 0) { + return val; + } + } + } + return 0; + } + + static const char* Name() { return Memory::Name(); } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + char* end = nullptr; + errno = 0; + double value = strtod(str.c_str(), &end); + + if (*end != '\0') { + return Result::Failure("Failed to parse double from " + str); + } + if (errno == ERANGE) { + return Result::OutOfRange( + "Failed to parse double from " + str + "; overflow/underflow occurred"); + } + + return Result::Success(value); + } + + static const char* Name() { return "double"; } +}; + +template +static inline CmdlineParseResult ParseNumeric(const std::string& str) { + static_assert(sizeof(T) < sizeof(long long int), // NOLINT [runtime/int] [4] + "Current support is restricted."); + + const char* begin = str.c_str(); + char* end; + + // Parse into a larger type (long long) because we can't use strtoul + // since it silently converts negative values into unsigned long and doesn't set errno. + errno = 0; + long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4] + if (begin == end || *end != '\0' || errno == EINVAL) { + return CmdlineParseResult::Failure("Failed to parse integer from " + str); + } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] + result < std::numeric_limits::min() || result > std::numeric_limits::max()) { + return CmdlineParseResult::OutOfRange( + "Failed to parse integer from " + str + "; out of range"); + } + + return CmdlineParseResult::Success(static_cast(result)); +} + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + return ParseNumeric(str); + } + + static const char* Name() { return "unsigned integer"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + return ParseNumeric(str); + } + + static const char* Name() { return "integer"; } +}; + +// Lightweight nanosecond value type. Allows parser to convert user-input from milliseconds +// to nanoseconds automatically after parsing. +// +// All implicit conversion from uint64_t uses nanoseconds. +struct MillisecondsToNanoseconds { + // Create from nanoseconds. + MillisecondsToNanoseconds(uint64_t nanoseconds) : nanoseconds_(nanoseconds) { // NOLINT [runtime/explicit] [5] + } + + // Create from milliseconds. + static MillisecondsToNanoseconds FromMilliseconds(unsigned int milliseconds) { + return MillisecondsToNanoseconds(MsToNs(milliseconds)); + } + + // Get the underlying nanoseconds value. + uint64_t GetNanoseconds() const { + return nanoseconds_; + } + + // Get the milliseconds value [via a conversion]. Loss of precision will occur. + uint64_t GetMilliseconds() const { + return NsToMs(nanoseconds_); + } + + // Get the underlying nanoseconds value. + operator uint64_t() const { + return GetNanoseconds(); + } + + // Default constructors/copy-constructors. + MillisecondsToNanoseconds() : nanoseconds_(0ul) {} + MillisecondsToNanoseconds(const MillisecondsToNanoseconds&) = default; + MillisecondsToNanoseconds(MillisecondsToNanoseconds&&) = default; + + private: + uint64_t nanoseconds_; +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + CmdlineType uint_parser; + CmdlineParseResult res = uint_parser.Parse(str); + + if (res.IsSuccess()) { + return Result::Success(MillisecondsToNanoseconds::FromMilliseconds(res.GetValue())); + } else { + return Result::CastError(res); + } + } + + static const char* Name() { return "MillisecondsToNanoseconds"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& args) { + return Result::Success(args); + } + + Result ParseAndAppend(const std::string& args, + std::string& existing_value) { + if (existing_value.empty()) { + existing_value = args; + } else { + existing_value += ' '; + existing_value += args; + } + return Result::SuccessNoValue(); + } +}; + +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result Parse(const std::string& args) { + assert(false && "Use AppendValues() for a Plugin vector type"); + return Result::Failure("Unconditional failure: Plugin vector must be appended: " + args); + } + + Result ParseAndAppend(const std::string& args, + std::vector& existing_value) { + existing_value.push_back(Plugin::Create(args)); + return Result::SuccessNoValue(); + } + + static const char* Name() { return "std::vector"; } +}; + +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result Parse(const std::string& args) { + assert(false && "Use AppendValues() for an Agent list type"); + return Result::Failure("Unconditional failure: Agent list must be appended: " + args); + } + + Result ParseAndAppend(const std::string& args, + std::list& existing_value) { + existing_value.emplace_back(args); + return Result::SuccessNoValue(); + } + + static const char* Name() { return "std::list"; } +}; + +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result Parse(const std::string& args) { + assert(false && "Use AppendValues() for a string vector type"); + return Result::Failure("Unconditional failure: string vector must be appended: " + args); + } + + Result ParseAndAppend(const std::string& args, + std::vector& existing_value) { + existing_value.push_back(args); + return Result::SuccessNoValue(); + } + + static const char* Name() { return "std::vector"; } +}; + +template +struct ParseStringList { + explicit ParseStringList(std::vector&& list) : list_(list) {} + + operator std::vector() const { + return list_; + } + + operator std::vector&&() && { + return std::move(list_); + } + + size_t Size() const { + return list_.size(); + } + + std::string Join() const { + return android::base::Join(list_, Separator); + } + + static ParseStringList Split(const std::string& str) { + std::vector list; + art::Split(str, Separator, &list); + return ParseStringList(std::move(list)); + } + + ParseStringList() = default; + ParseStringList(const ParseStringList&) = default; + ParseStringList(ParseStringList&&) = default; + + private: + std::vector list_; +}; + +template +struct CmdlineType> : CmdlineTypeParser> { + using Result = CmdlineParseResult>; + + Result Parse(const std::string& args) { + return Result::Success(ParseStringList::Split(args)); + } + + static const char* Name() { return "ParseStringList"; } +}; + +template <> +struct CmdlineType> : CmdlineTypeParser> { + using Result = CmdlineParseResult>; + + Result Parse(const std::string& args) { + std::vector list; + const char* pos = args.c_str(); + errno = 0; + + while (true) { + char* end = nullptr; + int64_t value = strtol(pos, &end, 10); + if (pos == end || errno == EINVAL) { + return Result::Failure("Failed to parse integer from " + args); + } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] + value < std::numeric_limits::min() || + value > std::numeric_limits::max()) { + return Result::OutOfRange("Failed to parse integer from " + args + "; out of range"); + } + list.push_back(static_cast(value)); + if (*end == '\0') { + break; + } else if (*end != ',') { + return Result::Failure(std::string("Unexpected character: ") + *end); + } + pos = end + 1; + } + return Result::Success(std::move(list)); + } + + static const char* Name() { return "std::vector"; } +}; + +static gc::CollectorType ParseCollectorType(const std::string& option) { + if (option == "MS" || option == "nonconcurrent") { + return gc::kCollectorTypeMS; + } else if (option == "CMS" || option == "concurrent") { + return gc::kCollectorTypeCMS; + } else if (option == "SS") { + return gc::kCollectorTypeSS; + } else if (option == "CC") { + return gc::kCollectorTypeCC; + } else { + return gc::kCollectorTypeNone; + } +} + +struct XGcOption { + // These defaults are used when the command line arguments for -Xgc: + // are either omitted completely or partially. + gc::CollectorType collector_type_ = gc::kCollectorTypeDefault; + bool verify_pre_gc_heap_ = false; + bool verify_pre_sweeping_heap_ = kIsDebugBuild; + bool generational_cc = kEnableGenerationalCCByDefault; + bool verify_post_gc_heap_ = false; + bool verify_pre_gc_rosalloc_ = kIsDebugBuild; + bool verify_pre_sweeping_rosalloc_ = false; + bool verify_post_gc_rosalloc_ = false; + // Do no measurements for kUseTableLookupReadBarrier to avoid test timeouts. b/31679493 + bool measure_ = kIsDebugBuild && !kUseTableLookupReadBarrier; + bool gcstress_ = false; +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& option) { // -Xgc: already stripped + XGcOption xgc{}; + + std::vector gc_options; + Split(option, ',', &gc_options); + for (const std::string& gc_option : gc_options) { + gc::CollectorType collector_type = ParseCollectorType(gc_option); + if (collector_type != gc::kCollectorTypeNone) { + xgc.collector_type_ = collector_type; + } else if (gc_option == "preverify") { + xgc.verify_pre_gc_heap_ = true; + } else if (gc_option == "nopreverify") { + xgc.verify_pre_gc_heap_ = false; + } else if (gc_option == "presweepingverify") { + xgc.verify_pre_sweeping_heap_ = true; + } else if (gc_option == "nopresweepingverify") { + xgc.verify_pre_sweeping_heap_ = false; + } else if (gc_option == "generational_cc") { + // Note: Option "-Xgc:generational_cc" can be passed directly by + // app_process/zygote (see `android::AndroidRuntime::startVm`). If this + // option is ever deprecated, it should still be accepted (but ignored) + // for compatibility reasons (this should not prevent the runtime from + // starting up). + xgc.generational_cc = true; + } else if (gc_option == "nogenerational_cc") { + // Note: Option "-Xgc:nogenerational_cc" can be passed directly by + // app_process/zygote (see `android::AndroidRuntime::startVm`). If this + // option is ever deprecated, it should still be accepted (but ignored) + // for compatibility reasons (this should not prevent the runtime from + // starting up). + xgc.generational_cc = false; + } else if (gc_option == "postverify") { + xgc.verify_post_gc_heap_ = true; + } else if (gc_option == "nopostverify") { + xgc.verify_post_gc_heap_ = false; + } else if (gc_option == "preverify_rosalloc") { + xgc.verify_pre_gc_rosalloc_ = true; + } else if (gc_option == "nopreverify_rosalloc") { + xgc.verify_pre_gc_rosalloc_ = false; + } else if (gc_option == "presweepingverify_rosalloc") { + xgc.verify_pre_sweeping_rosalloc_ = true; + } else if (gc_option == "nopresweepingverify_rosalloc") { + xgc.verify_pre_sweeping_rosalloc_ = false; + } else if (gc_option == "postverify_rosalloc") { + xgc.verify_post_gc_rosalloc_ = true; + } else if (gc_option == "nopostverify_rosalloc") { + xgc.verify_post_gc_rosalloc_ = false; + } else if (gc_option == "gcstress") { + xgc.gcstress_ = true; + } else if (gc_option == "nogcstress") { + xgc.gcstress_ = false; + } else if (gc_option == "measure") { + xgc.measure_ = true; + } else if ((gc_option == "precise") || + (gc_option == "noprecise") || + (gc_option == "verifycardtable") || + (gc_option == "noverifycardtable")) { + // Ignored for backwards compatibility. + } else { + return Result::Usage(std::string("Unknown -Xgc option ") + gc_option); + } + } + + return Result::Success(std::move(xgc)); + } + + static const char* Name() { return "XgcOption"; } +}; + +struct BackgroundGcOption { + // If background_collector_type_ is kCollectorTypeNone, it defaults to the + // XGcOption::collector_type_ after parsing options. If you set this to + // kCollectorTypeHSpaceCompact then we will do an hspace compaction when + // we transition to background instead of a normal collector transition. + gc::CollectorType background_collector_type_; + + BackgroundGcOption(gc::CollectorType background_collector_type) // NOLINT [runtime/explicit] [5] + : background_collector_type_(background_collector_type) {} + BackgroundGcOption() + : background_collector_type_(gc::kCollectorTypeNone) { + } + + operator gc::CollectorType() const { return background_collector_type_; } +}; + +template<> +struct CmdlineType + : CmdlineTypeParser, private BackgroundGcOption { + Result Parse(const std::string& substring) { + // Special handling for HSpaceCompact since this is only valid as a background GC type. + if (substring == "HSpaceCompact") { + background_collector_type_ = gc::kCollectorTypeHomogeneousSpaceCompact; + } else { + gc::CollectorType collector_type = ParseCollectorType(substring); + if (collector_type != gc::kCollectorTypeNone) { + background_collector_type_ = collector_type; + } else { + return Result::Failure(); + } + } + + BackgroundGcOption res = *this; + return Result::Success(res); + } + + static const char* Name() { return "BackgroundGcOption"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& options) { + LogVerbosity log_verbosity = LogVerbosity(); + + std::vector verbose_options; + Split(options, ',', &verbose_options); + for (size_t j = 0; j < verbose_options.size(); ++j) { + if (verbose_options[j] == "class") { + log_verbosity.class_linker = true; + } else if (verbose_options[j] == "collector") { + log_verbosity.collector = true; + } else if (verbose_options[j] == "compiler") { + log_verbosity.compiler = true; + } else if (verbose_options[j] == "deopt") { + log_verbosity.deopt = true; + } else if (verbose_options[j] == "gc") { + log_verbosity.gc = true; + } else if (verbose_options[j] == "heap") { + log_verbosity.heap = true; + } else if (verbose_options[j] == "interpreter") { + log_verbosity.interpreter = true; + } else if (verbose_options[j] == "jdwp") { + log_verbosity.jdwp = true; + } else if (verbose_options[j] == "jit") { + log_verbosity.jit = true; + } else if (verbose_options[j] == "jni") { + log_verbosity.jni = true; + } else if (verbose_options[j] == "monitor") { + log_verbosity.monitor = true; + } else if (verbose_options[j] == "oat") { + log_verbosity.oat = true; + } else if (verbose_options[j] == "profiler") { + log_verbosity.profiler = true; + } else if (verbose_options[j] == "signals") { + log_verbosity.signals = true; + } else if (verbose_options[j] == "simulator") { + log_verbosity.simulator = true; + } else if (verbose_options[j] == "startup") { + log_verbosity.startup = true; + } else if (verbose_options[j] == "third-party-jni") { + log_verbosity.third_party_jni = true; + } else if (verbose_options[j] == "threads") { + log_verbosity.threads = true; + } else if (verbose_options[j] == "verifier") { + log_verbosity.verifier = true; + } else if (verbose_options[j] == "verifier-debug") { + log_verbosity.verifier_debug = true; + } else if (verbose_options[j] == "image") { + log_verbosity.image = true; + } else if (verbose_options[j] == "systrace-locks") { + log_verbosity.systrace_lock_logging = true; + } else if (verbose_options[j] == "plugin") { + log_verbosity.plugin = true; + } else if (verbose_options[j] == "agents") { + log_verbosity.agents = true; + } else if (verbose_options[j] == "dex") { + log_verbosity.dex = true; + } else { + return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]); + } + } + + return Result::Success(log_verbosity); + } + + static const char* Name() { return "LogVerbosity"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + using Result = CmdlineParseResult; + + private: + using StringResult = CmdlineParseResult; + using DoubleResult = CmdlineParseResult; + + template + static Result ParseInto(ProfileSaverOptions& options, + T ProfileSaverOptions::*pField, + CmdlineParseResult&& result) { + assert(pField != nullptr); + + if (result.IsSuccess()) { + options.*pField = result.ReleaseValue(); + return Result::SuccessNoValue(); + } + + return Result::CastError(result); + } + + static std::string RemovePrefix(const std::string& source) { + size_t prefix_idx = source.find(':'); + + if (prefix_idx == std::string::npos) { + return ""; + } + + return source.substr(prefix_idx + 1); + } + + public: + Result ParseAndAppend(const std::string& option, ProfileSaverOptions& existing) { + // Special case which doesn't include a wildcard argument definition. + // We pass-it through as-is. + if (option == "-Xjitsaveprofilinginfo") { + existing.enabled_ = true; + return Result::SuccessNoValue(); + } + + if (option == "profile-boot-class-path") { + existing.profile_boot_class_path_ = true; + return Result::SuccessNoValue(); + } + + if (option == "profile-aot-code") { + existing.profile_aot_code_ = true; + return Result::SuccessNoValue(); + } + + if (option == "save-without-jit-notifications") { + existing.wait_for_jit_notifications_to_save_ = false; + return Result::SuccessNoValue(); + } + + // The rest of these options are always the wildcard from '-Xps-*' + std::string suffix = RemovePrefix(option); + + if (android::base::StartsWith(option, "min-save-period-ms:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::min_save_period_ms_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "save-resolved-classes-delay-ms:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::save_resolved_classes_delay_ms_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "hot-startup-method-samples:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::hot_startup_method_samples_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "min-methods-to-save:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::min_methods_to_save_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "min-classes-to-save:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::min_classes_to_save_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "min-notification-before-wake:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::min_notification_before_wake_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "max-notification-before-wake:")) { + CmdlineType type_parser; + return ParseInto(existing, + &ProfileSaverOptions::max_notification_before_wake_, + type_parser.Parse(suffix)); + } + if (android::base::StartsWith(option, "profile-path:")) { + existing.profile_path_ = suffix; + return Result::SuccessNoValue(); + } + + return Result::Failure(std::string("Invalid suboption '") + option + "'"); + } + + static const char* Name() { return "ProfileSaverOptions"; } + static constexpr bool kCanParseBlankless = true; +}; + +template<> +struct CmdlineType : CmdlineTypeParser { + Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) { + if (option == "none") { + existing = ExperimentalFlags::kNone; + } else { + return Result::Failure(std::string("Unknown option '") + option + "'"); + } + return Result::SuccessNoValue(); + } + + static const char* Name() { return "ExperimentalFlags"; } +}; +} // namespace art +#endif // ART_CMDLINE_CMDLINE_TYPES_H_ diff --git a/cmdline/detail/cmdline_debug_detail.h b/cmdline/detail/cmdline_debug_detail.h new file mode 100644 index 0000000..e69d5dc --- /dev/null +++ b/cmdline/detail/cmdline_debug_detail.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ + +#include +#ifndef CMDLINE_NDEBUG +#define CMDLINE_DEBUG_LOG std::cerr +#else +#define CMDLINE_DEBUG_LOG ::art::detail::debug_log_ignore() +#endif + +namespace art { +// Implementation details for some template querying. Don't look inside if you hate templates. +namespace detail { +struct debug_log_ignore { + // Ignore most of the normal operator<< usage. + template + debug_log_ignore& operator<<(const T&) { return *this; } + // Ignore std::endl and the like. + debug_log_ignore& operator<<(std::ostream& (*)(std::ostream&) ) { return *this; } +}; +} // namespace detail // NOLINT [readability/namespace] [5] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h new file mode 100644 index 0000000..2155435 --- /dev/null +++ b/cmdline/detail/cmdline_parse_argument_detail.h @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "android-base/strings.h" + +#include "cmdline_parse_result.h" +#include "cmdline_types.h" +#include "token_range.h" +#include "unit.h" + +namespace art { +// Implementation details for the parser. Do not look inside if you hate templates. +namespace detail { +// A non-templated base class for argument parsers. Used by the general parser +// to parse arguments, without needing to know the argument type at compile time. +// +// This is an application of the type erasure idiom. +struct CmdlineParseArgumentAny { + virtual ~CmdlineParseArgumentAny() {} + + // Attempt to parse this argument starting at arguments[position]. + // If the parsing succeeds, the parsed value will be saved as a side-effect. + // + // In most situations, the parsing will not match by returning kUnknown. In this case, + // no tokens were consumed and the position variable will not be updated. + // + // At other times, parsing may fail due to validation but the initial token was still matched + // (for example an out of range value, or passing in a string where an int was expected). + // In this case the tokens are still consumed, and the position variable will get incremented + // by all the consumed tokens. + // + // The # of tokens consumed by the parse attempt will be set as an out-parameter into + // consumed_tokens. The parser should skip this many tokens before parsing the next + // argument. + virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) = 0; + // How many tokens should be taken off argv for parsing this argument. + // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). + // + // A [min,max] range is returned to represent argument definitions with multiple + // value tokens. (e.g. {"-h", "-h " } would return [1,2]). + virtual std::pair GetNumTokens() const = 0; + // Get the run-time typename of the argument type. + virtual const char* GetTypeName() const = 0; + // Try to do a close match, returning how many tokens were matched against this argument + // definition. More tokens is better. + // + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them are only matched up until the wildcard. + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + // + // Returns how many tokens were either matched (or ignored because there was a + // wildcard present). 0 means no match. If the Size() tokens are returned. + virtual size_t MaybeMatches(const TokenRange& tokens) = 0; +}; + +template +using EnableIfNumeric = std::enable_if::value>; + +template +using DisableIfNumeric = std::enable_if::value>; + +// Argument definition information, created by an ArgumentBuilder and an UntypedArgumentBuilder. +template +struct CmdlineParserArgumentInfo { + // This version will only be used if TArg is arithmetic and thus has the <= operators. + template // Necessary to get SFINAE to kick in. + bool CheckRange(const TArg& value, typename EnableIfNumeric::type* = nullptr) { + if (has_range_) { + return min_ <= value && value <= max_; + } + return true; + } + + // This version will be used at other times when TArg is not arithmetic. + template + bool CheckRange(const TArg&, typename DisableIfNumeric::type* = nullptr) { + assert(!has_range_); + return true; + } + + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them only match the prefix up until the wildcard. + // + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + size_t MaybeMatches(const TokenRange& token_list) const { + auto best_match = FindClosestMatch(token_list); + + return best_match.second; + } + + // Attempt to find the closest match (see MaybeMatches). + // + // Returns the token range that was the closest match and the # of tokens that + // this range was matched up until. + std::pair FindClosestMatch(const TokenRange& token_list) const { + const TokenRange* best_match_ptr = nullptr; + + size_t best_match = 0; + for (auto&& token_range : tokenized_names_) { + size_t this_match = token_range.MaybeMatches(token_list, std::string("_")); + + if (this_match > best_match) { + best_match_ptr = &token_range; + best_match = this_match; + } + } + + return std::make_pair(best_match_ptr, best_match); + } + + // Mark the argument definition as completed, do not mutate the object anymore after this + // call is done. + // + // Performs several sanity checks and token calculations. + void CompleteArgument() { + assert(names_.size() >= 1); + assert(!is_completed_); + + is_completed_ = true; + + size_t blank_count = 0; + size_t token_count = 0; + + size_t global_blank_count = 0; + size_t global_token_count = 0; + for (auto&& name : names_) { + std::string s(name); + + size_t local_blank_count = std::count(s.begin(), s.end(), '_'); + size_t local_token_count = std::count(s.begin(), s.end(), ' '); + + if (global_blank_count != 0) { + assert(local_blank_count == global_blank_count + && "Every argument descriptor string must have same amount of blanks (_)"); + } + + if (local_blank_count != 0) { + global_blank_count = local_blank_count; + blank_count++; + + assert(local_blank_count == 1 && "More than one blank is not supported"); + assert(s.back() == '_' && "The blank character must only be at the end of the string"); + } + + if (global_token_count != 0) { + assert(local_token_count == global_token_count + && "Every argument descriptor string must have same amount of tokens (spaces)"); + } + + if (local_token_count != 0) { + global_token_count = local_token_count; + token_count++; + } + + // Tokenize every name, turning it from a string to a token list. + tokenized_names_.clear(); + for (auto&& name1 : names_) { + // Split along ' ' only, removing any duplicated spaces. + tokenized_names_.push_back( + TokenRange::Split(name1, {' '}).RemoveToken(" ")); + } + + // remove the _ character from each of the token ranges + // we will often end up with an empty token (i.e. ["-XX", "_"] -> ["-XX", ""] + // and this is OK because we still need an empty token to simplify + // range comparisons + simple_names_.clear(); + + for (auto&& tokenized_name : tokenized_names_) { + simple_names_.push_back(tokenized_name.RemoveCharacter('_')); + } + } + + if (token_count != 0) { + assert(("Every argument descriptor string must have equal amount of tokens (spaces)" && + token_count == names_.size())); + } + + if (blank_count != 0) { + assert(("Every argument descriptor string must have an equal amount of blanks (_)" && + blank_count == names_.size())); + } + + using_blanks_ = blank_count > 0; + { + size_t smallest_name_token_range_size = + std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), ~(0u), + [](size_t min, const TokenRange& cur) { + return std::min(min, cur.Size()); + }); + size_t largest_name_token_range_size = + std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), 0u, + [](size_t max, const TokenRange& cur) { + return std::max(max, cur.Size()); + }); + + token_range_size_ = std::make_pair(smallest_name_token_range_size, + largest_name_token_range_size); + } + + if (has_value_list_) { + assert(names_.size() == value_list_.size() + && "Number of arg descriptors must match number of values"); + assert(!has_value_map_); + } + if (has_value_map_) { + if (!using_blanks_) { + assert(names_.size() == value_map_.size() && + "Since no blanks were specified, each arg is mapped directly into a mapped " + "value without parsing; sizes must match"); + } + + assert(!has_value_list_); + } + + if (!using_blanks_ && !CmdlineType::kCanParseBlankless) { + assert((has_value_map_ || has_value_list_) && + "Arguments without a blank (_) must provide either a value map or a value list"); + } + + TypedCheck(); + } + + // List of aliases for a single argument definition, e.g. {"-Xdex2oat", "-Xnodex2oat"}. + std::vector names_; + // Is there at least 1 wildcard '_' in the argument definition? + bool using_blanks_ = false; + // [min, max] token counts in each arg def + std::pair token_range_size_; + + // contains all the names in a tokenized form, i.e. as a space-delimited list + std::vector tokenized_names_; + + // contains the tokenized names, but with the _ character stripped + std::vector simple_names_; + + // For argument definitions created with '.AppendValues()' + // Meaning that parsing should mutate the existing value in-place if possible. + bool appending_values_ = false; + + // For argument definitions created with '.WithRange(min, max)' + bool has_range_ = false; + TArg min_; + TArg max_; + + // For argument definitions created with '.WithValueMap' + bool has_value_map_ = false; + std::vector> value_map_; + + // For argument definitions created with '.WithValues' + bool has_value_list_ = false; + std::vector value_list_; + + // Make sure there's a default constructor. + CmdlineParserArgumentInfo() = default; + + // Ensure there's a default move constructor. + CmdlineParserArgumentInfo(CmdlineParserArgumentInfo&&) = default; + + private: + // Perform type-specific checks at runtime. + template + void TypedCheck(typename std::enable_if::value>::type* = 0) { + assert(!using_blanks_ && + "Blanks are not supported in Unit arguments; since a Unit has no parse-able value"); + } + + void TypedCheck() {} + + bool is_completed_ = false; +}; + +// A virtual-implementation of the necessary argument information in order to +// be able to parse arguments. +template +struct CmdlineParseArgument : CmdlineParseArgumentAny { + CmdlineParseArgument(CmdlineParserArgumentInfo&& argument_info, + std::function&& save_argument, + std::function&& load_argument) + : argument_info_(std::forward(argument_info)), + save_argument_(std::forward(save_argument)), + load_argument_(std::forward(load_argument)) { + } + + using UserTypeInfo = CmdlineType; + + virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) { + assert(arguments.Size() > 0); + assert(consumed_tokens != nullptr); + + auto closest_match_res = argument_info_.FindClosestMatch(arguments); + size_t best_match_size = closest_match_res.second; + const TokenRange* best_match_arg_def = closest_match_res.first; + + if (best_match_size > arguments.Size()) { + // The best match has more tokens than were provided. + // Shouldn't happen in practice since the outer parser does this check. + return CmdlineResult(CmdlineResult::kUnknown, "Size mismatch"); + } + + assert(best_match_arg_def != nullptr); + *consumed_tokens = best_match_arg_def->Size(); + + if (!argument_info_.using_blanks_) { + return ParseArgumentSingle(arguments.Join(' ')); + } + + // Extract out the blank value from arguments + // e.g. for a def of "foo:_" and input "foo:bar", blank_value == "bar" + std::string blank_value = ""; + size_t idx = 0; + for (auto&& def_token : *best_match_arg_def) { + auto&& arg_token = arguments[idx]; + + // Does this definition-token have a wildcard in it? + if (def_token.find('_') == std::string::npos) { + // No, regular token. Match 1:1 against the argument token. + bool token_match = def_token == arg_token; + + if (!token_match) { + return CmdlineResult(CmdlineResult::kFailure, + std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + + " at token " + std::to_string(idx)); + } + } else { + // This is a wild-carded token. + TokenRange def_split_wildcards = TokenRange::Split(def_token, {'_'}); + + // Extract the wildcard contents out of the user-provided arg_token. + std::unique_ptr arg_matches = + def_split_wildcards.MatchSubstrings(arg_token, "_"); + if (arg_matches == nullptr) { + return CmdlineResult(CmdlineResult::kFailure, + std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + + ", with a wildcard pattern " + def_token + + " at token " + std::to_string(idx)); + } + + // Get the corresponding wildcard tokens from arg_matches, + // and concatenate it to blank_value. + for (size_t sub_idx = 0; + sub_idx < def_split_wildcards.Size() && sub_idx < arg_matches->Size(); ++sub_idx) { + if (def_split_wildcards[sub_idx] == "_") { + blank_value += arg_matches->GetToken(sub_idx); + } + } + } + + ++idx; + } + + return ParseArgumentSingle(blank_value); + } + + private: + virtual CmdlineResult ParseArgumentSingle(const std::string& argument) { + // TODO: refactor to use LookupValue for the value lists/maps + + // Handle the 'WithValueMap(...)' argument definition + if (argument_info_.has_value_map_) { + for (auto&& value_pair : argument_info_.value_map_) { + const char* name = value_pair.first; + + if (argument == name) { + return SaveArgument(value_pair.second); + } + } + + // Error case: Fail, telling the user what the allowed values were. + std::vector allowed_values; + for (auto&& value_pair : argument_info_.value_map_) { + const char* name = value_pair.first; + allowed_values.push_back(name); + } + + std::string allowed_values_flat = android::base::Join(allowed_values, ','); + return CmdlineResult(CmdlineResult::kFailure, + "Argument value '" + argument + "' does not match any of known valid " + "values: {" + allowed_values_flat + "}"); + } + + // Handle the 'WithValues(...)' argument definition + if (argument_info_.has_value_list_) { + size_t arg_def_idx = 0; + for (auto&& value : argument_info_.value_list_) { + auto&& arg_def_token = argument_info_.names_[arg_def_idx]; + + if (arg_def_token == argument) { + return SaveArgument(value); + } + ++arg_def_idx; + } + + assert(arg_def_idx + 1 == argument_info_.value_list_.size() && + "Number of named argument definitions must match number of values defined"); + + // Error case: Fail, telling the user what the allowed values were. + std::vector allowed_values; + for (auto&& arg_name : argument_info_.names_) { + allowed_values.push_back(arg_name); + } + + std::string allowed_values_flat = android::base::Join(allowed_values, ','); + return CmdlineResult(CmdlineResult::kFailure, + "Argument value '" + argument + "' does not match any of known valid" + "values: {" + allowed_values_flat + "}"); + } + + // Handle the regular case where we parsed an unknown value from a blank. + UserTypeInfo type_parser; + + if (argument_info_.appending_values_) { + TArg& existing = load_argument_(); + CmdlineParseResult result = type_parser.ParseAndAppend(argument, existing); + + assert(!argument_info_.has_range_); + + return std::move(result); + } + + CmdlineParseResult result = type_parser.Parse(argument); + + if (result.IsSuccess()) { + TArg& value = result.GetValue(); + + // Do a range check for 'WithRange(min,max)' argument definition. + if (!argument_info_.CheckRange(value)) { + return CmdlineParseResult::OutOfRange( + value, argument_info_.min_, argument_info_.max_); + } + + return SaveArgument(value); + } + + // Some kind of type-specific parse error. Pass the result as-is. + CmdlineResult raw_result = std::move(result); + return raw_result; + } + + public: + virtual const char* GetTypeName() const { + // TODO: Obviate the need for each type specialization to hardcode the type name + return UserTypeInfo::Name(); + } + + // How many tokens should be taken off argv for parsing this argument. + // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). + // + // A [min,max] range is returned to represent argument definitions with multiple + // value tokens. (e.g. {"-h", "-h " } would return [1,2]). + virtual std::pair GetNumTokens() const { + return argument_info_.token_range_size_; + } + + // See if this token range might begin the same as the argument definition. + virtual size_t MaybeMatches(const TokenRange& tokens) { + return argument_info_.MaybeMatches(tokens); + } + + private: + CmdlineResult SaveArgument(const TArg& value) { + assert(!argument_info_.appending_values_ + && "If the values are being appended, then the updated parse value is " + "updated by-ref as a side effect and shouldn't be stored directly"); + TArg val = value; + save_argument_(val); + return CmdlineResult(CmdlineResult::kSuccess); + } + + CmdlineParserArgumentInfo argument_info_; + std::function save_argument_; + std::function load_argument_; +}; +} // namespace detail // NOLINT [readability/namespace] [5] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ diff --git a/cmdline/detail/cmdline_parser_detail.h b/cmdline/detail/cmdline_parser_detail.h new file mode 100644 index 0000000..2078d7a --- /dev/null +++ b/cmdline/detail/cmdline_parser_detail.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ + +#include +#include +#include + +namespace art { +// Implementation details for some template querying. Don't look inside if you hate templates. +namespace detail { +template +typename std::remove_reference::type& FakeReference(); + +// SupportsInsertionOperator::value will evaluate to a boolean, +// whose value is true if the TStream class supports the << operator against T, +// and false otherwise. +template +struct SupportsInsertionOperator { + private: + template + static std::true_type InsertionOperatorTest(TStream& os, const T& value, + std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] + + template + static std::false_type InsertionOperatorTest(TStream& os, const T& ... args); + + public: + static constexpr bool value = + decltype(InsertionOperatorTest(FakeReference(), std::declval()))::value; +}; + +template +struct SupportsEqualityOperatorImpl; + +template +struct SupportsEqualityOperatorImpl { + private: + template + static std::true_type EqualityOperatorTest(const TL& left, const TR& right, + std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] + + template + static std::false_type EqualityOperatorTest(const TL& left, const T& ... args); + + public: + static constexpr bool value = + decltype(EqualityOperatorTest(std::declval(), std::declval()))::value; +}; + +// Partial specialization when TLeft/TRight are both floating points. +// This is a work-around because decltype(floatvar1 == floatvar2) +// will not compile with clang: +// error: comparing floating point with == or != is unsafe [-Werror,-Wfloat-equal] +template +struct SupportsEqualityOperatorImpl { + static constexpr bool value = true; +}; + +// SupportsEqualityOperatorImpl::value will evaluate to a boolean, +// whose value is true if T1 can be compared against T2 with ==, +// and false otherwise. +template +struct SupportsEqualityOperator : // NOLINT [whitespace/labels] [4] + SupportsEqualityOperatorImpl::value + && std::is_floating_point::value> { +}; + +// Convert any kind of type to an std::string, even if there's no +// serialization support for it. Unknown types get converted to an +// an arbitrary value. +// +// Meant for printing user-visible errors or unit test failures only. +template +std::string ToStringAny(const T& value, + typename std::enable_if< + SupportsInsertionOperator::value>::type* = nullptr) { + std::stringstream stream; + stream << value; + return stream.str(); +} + +template +std::string ToStringAny(const std::vector value, + typename std::enable_if< + SupportsInsertionOperator::value>::type* = nullptr) { + std::stringstream stream; + stream << "vector{"; + + for (size_t i = 0; i < value.size(); ++i) { + stream << ToStringAny(value[i]); + + if (i != value.size() - 1) { + stream << ','; + } + } + + stream << "}"; + return stream.str(); +} + +template +std::string ToStringAny(const T&, + typename std::enable_if< + !SupportsInsertionOperator::value>::type* = nullptr +) { + return std::string("(unknown type [no operator<< implemented] for )"); +} +} // namespace detail // NOLINT [readability/namespace] [5] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ diff --git a/cmdline/memory_representation.h b/cmdline/memory_representation.h new file mode 100644 index 0000000..8db68bc --- /dev/null +++ b/cmdline/memory_representation.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_MEMORY_REPRESENTATION_H_ +#define ART_CMDLINE_MEMORY_REPRESENTATION_H_ + +#include +#include +#include + +#include "base/bit_utils.h" + +namespace art { + +// An integral representation of bytes of memory. +// The underlying runtime size_t value is guaranteed to be a multiple of Divisor. +template +struct Memory { + static_assert(IsPowerOfTwo(kDivisor), "Divisor must be a power of 2"); + + static Memory FromBytes(size_t bytes) { + assert(bytes % kDivisor == 0); + return Memory(bytes); + } + + Memory() : Value(0u) {} + Memory(size_t value) : Value(value) { // NOLINT [runtime/explicit] [5] + assert(value % kDivisor == 0); + } + operator size_t() const { return Value; } + + size_t ToBytes() const { + return Value; + } + + static const char* Name() { + static std::string str; + if (str.empty()) { + str = "Memory<" + std::to_string(kDivisor) + '>'; + } + + return str.c_str(); + } + + size_t Value; +}; + +template +std::ostream& operator<<(std::ostream& stream, Memory memory) { + return stream << memory.Value << '*' << kDivisor; +} + +using MemoryKiB = Memory<1024>; + +} // namespace art + +#endif // ART_CMDLINE_MEMORY_REPRESENTATION_H_ diff --git a/cmdline/token_range.h b/cmdline/token_range.h new file mode 100644 index 0000000..e28ead9 --- /dev/null +++ b/cmdline/token_range.h @@ -0,0 +1,427 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_TOKEN_RANGE_H_ +#define ART_CMDLINE_TOKEN_RANGE_H_ + +#include +#include +#include +#include +#include + +#include "android-base/strings.h" + +namespace art { +// A range of tokens to make token matching algorithms easier. +// +// We try really hard to avoid copying and store only a pointer and iterators to the +// interiors of the vector, so a typical copy constructor never ends up doing a deep copy. +// It is up to the user to play nice and not to mutate the strings in-place. +// +// Tokens are only copied if a mutating operation is performed (and even then only +// if it *actually* mutates the token). +struct TokenRange { + // Short-hand for a vector of strings. A single string and a token is synonymous. + using TokenList = std::vector; + + // Copying-from-vector constructor. + explicit TokenRange(const TokenList& token_list) + : token_list_(new TokenList(token_list)), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Copying-from-iterator constructor + template + TokenRange(ForwardIterator it_begin, ForwardIterator it_end) + : token_list_(new TokenList(it_begin, it_end)), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + +#if 0 + // Copying-from-vector constructor. + TokenRange(const TokenList& token_list ATTRIBUTE_UNUSED, + TokenList::const_iterator it_begin, + TokenList::const_iterator it_end) + : token_list_(new TokenList(it_begin, it_end)), + begin_(token_list_->begin()), + end_(token_list_->end()) { + assert(it_begin >= token_list.begin()); + assert(it_end <= token_list.end()); + } +#endif + + // Copying from char array constructor, convertings into tokens (strings) along the way. + TokenRange(const char* token_list[], size_t length) + : token_list_(new TokenList(&token_list[0], &token_list[length])), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Non-copying move-from-vector constructor. Takes over the token vector. + explicit TokenRange(TokenList&& token_list) + : token_list_(new TokenList(std::forward(token_list))), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Non-copying constructor. Retain reference to existing list of tokens. + TokenRange(std::shared_ptr token_list, + TokenList::const_iterator it_begin, + TokenList::const_iterator it_end) + : token_list_(token_list), + begin_(it_begin), + end_(it_end) { + assert(it_begin >= token_list->begin()); + assert(it_end <= token_list->end()); + } + + // Non-copying copy constructor. + TokenRange(const TokenRange&) = default; + + // Non-copying move constructor. + TokenRange(TokenRange&&) = default; + + // Non-copying constructor. Retains reference to an existing list of tokens, with offset. + explicit TokenRange(std::shared_ptr token_list) + : token_list_(token_list), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Iterator type for begin() and end(). Guaranteed to be a RandomAccessIterator. + using iterator = TokenList::const_iterator; + + // Iterator type for const begin() and const end(). Guaranteed to be a RandomAccessIterator. + using const_iterator = iterator; + + // Create a token range by splitting a string. Each separator gets their own token. + // Since the separator are retained as tokens, it might be useful to call + // RemoveToken afterwards. + static TokenRange Split(const std::string& string, std::initializer_list separators) { + TokenList new_token_list; + + std::string tok; + for (auto&& c : string) { + for (char sep : separators) { + if (c == sep) { + // We spotted a separator character. + // Push back everything before the last separator as a new token. + // Push back the separator as a token. + if (!tok.empty()) { + new_token_list.push_back(tok); + tok = ""; + } + new_token_list.push_back(std::string() + sep); + } else { + // Build up the token with another character. + tok += c; + } + } + } + + if (!tok.empty()) { + new_token_list.push_back(tok); + } + + return TokenRange(std::move(new_token_list)); + } + + // A RandomAccessIterator to the first element in this range. + iterator begin() const { + return begin_; + } + + // A RandomAccessIterator to one past the last element in this range. + iterator end() const { + return end_; + } + + // The size of the range, i.e. how many tokens are in it. + size_t Size() const { + return std::distance(begin_, end_); + } + + // Are there 0 tokens in this range? + bool IsEmpty() const { + return Size() > 0; + } + + // Look up a token by it's offset. + const std::string& GetToken(size_t offset) const { + assert(offset < Size()); + return *(begin_ + offset); + } + + // Does this token range equal the other range? + // Equality is defined as having both the same size, and + // each corresponding token being equal. + bool operator==(const TokenRange& other) const { + if (this == &other) { + return true; + } + + if (Size() != other.Size()) { + return false; + } + + return std::equal(begin(), end(), other.begin()); + } + + // Look up the token at the requested index. + const std::string& operator[](int index) const { + assert(index >= 0 && static_cast(index) < Size()); + return *(begin() + index); + } + + // Does this current range start with the other range? + bool StartsWith(const TokenRange& other) const { + if (this == &other) { + return true; + } + + if (Size() < other.Size()) { + return false; + } + + auto& smaller = Size() < other.Size() ? *this : other; + auto& greater = Size() < other.Size() ? other : *this; + + return std::equal(smaller.begin(), smaller.end(), greater.begin()); + } + + // Remove all characters 'c' from each token, potentially copying the underlying tokens. + TokenRange RemoveCharacter(char c) const { + TokenList new_token_list(begin(), end()); + + bool changed = false; + for (auto&& token : new_token_list) { + auto it = std::remove_if(token.begin(), token.end(), [&](char ch) { + if (ch == c) { + changed = true; + return true; + } + return false; + }); + token.erase(it, token.end()); + } + + if (!changed) { + return *this; + } + + return TokenRange(std::move(new_token_list)); + } + + // Remove all tokens matching this one, potentially copying the underlying tokens. + TokenRange RemoveToken(const std::string& token) { + return RemoveIf([&](const std::string& tok) { return tok == token; }); + } + + // Discard all empty tokens, potentially copying the underlying tokens. + TokenRange DiscardEmpty() const { + return RemoveIf([](const std::string& token) { return token.empty(); }); + } + + // Create a non-copying subset of this range. + // Length is trimmed so that the Slice does not go out of range. + TokenRange Slice(size_t offset, size_t length = std::string::npos) const { + assert(offset < Size()); + + if (length != std::string::npos && offset + length > Size()) { + length = Size() - offset; + } + + iterator it_end; + if (length == std::string::npos) { + it_end = end(); + } else { + it_end = begin() + offset + length; + } + + return TokenRange(token_list_, begin() + offset, it_end); + } + + // Try to match the string with tokens from this range. + // Each token is used to match exactly once (after which the next token is used, and so on). + // The matching happens from left-to-right in a non-greedy fashion. + // If the currently-matched token is the wildcard, then the new outputted token will + // contain as much as possible until the next token is matched. + // + // For example, if this == ["a:", "_", "b:] and "_" is the match string, then + // MatchSubstrings on "a:foob:" will yield: ["a:", "foo", "b:"] + // + // Since the string matching can fail (e.g. ["foo"] against "bar"), then this + // function can fail, in which cause it will return null. + std::unique_ptr MatchSubstrings(const std::string& string, + const std::string& wildcard) const { + TokenList new_token_list; + + size_t wildcard_idx = std::string::npos; + size_t string_idx = 0; + + // Function to push all the characters matched as a wildcard so far + // as a brand new token. It resets the wildcard matching. + // Empty wildcards are possible and ok, but only if wildcard matching was on. + auto maybe_push_wildcard_token = [&]() { + if (wildcard_idx != std::string::npos) { + size_t wildcard_length = string_idx - wildcard_idx; + std::string wildcard_substr = string.substr(wildcard_idx, wildcard_length); + new_token_list.push_back(std::move(wildcard_substr)); + + wildcard_idx = std::string::npos; + } + }; + + for (iterator it = begin(); it != end(); ++it) { + const std::string& tok = *it; + + if (tok == wildcard) { + maybe_push_wildcard_token(); + wildcard_idx = string_idx; + continue; + } + + size_t next_token_idx = string.find(tok); + if (next_token_idx == std::string::npos) { + // Could not find token at all + return nullptr; + } else if (next_token_idx != string_idx && wildcard_idx == std::string::npos) { + // Found the token at a non-starting location, and we weren't + // trying to parse the wildcard. + return nullptr; + } + + new_token_list.push_back(string.substr(next_token_idx, tok.size())); + maybe_push_wildcard_token(); + string_idx += tok.size(); + } + + size_t remaining = string.size() - string_idx; + if (remaining > 0) { + if (wildcard_idx == std::string::npos) { + // Some characters were still remaining in the string, + // but it wasn't trying to match a wildcard. + return nullptr; + } + } + + // If some characters are remaining, the rest must be a wildcard. + string_idx += remaining; + maybe_push_wildcard_token(); + + return std::make_unique(std::move(new_token_list)); + } + + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them are only matched up until the wildcard. + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + // + // Returns how many tokens were either matched (or ignored because there was a + // wildcard present). 0 means no match. If the size() tokens are returned. + size_t MaybeMatches(const TokenRange& token_list, const std::string& wildcard) const { + auto token_it = token_list.begin(); + auto token_end = token_list.end(); + auto name_it = begin(); + auto name_end = end(); + + size_t matched_tokens = 0; + + while (token_it != token_end && name_it != name_end) { + // Skip token matching when the corresponding name has a wildcard in it. + const std::string& name = *name_it; + + size_t wildcard_idx = name.find(wildcard); + if (wildcard_idx == std::string::npos) { // No wildcard present + // Did the definition token match the user token? + if (name != *token_it) { + return matched_tokens; + } + } else { + std::string name_prefix = name.substr(0, wildcard_idx); + + // Did the user token start with the up-to-the-wildcard prefix? + if (!StartsWith(*token_it, name_prefix)) { + return matched_tokens; + } + } + + ++token_it; + ++name_it; + ++matched_tokens; + } + + // If we got this far, it's either a full match or the token list was too short. + return matched_tokens; + } + + // Flatten the token range by joining every adjacent token with the separator character. + // e.g. ["hello", "world"].join('$') == "hello$world" + std::string Join(char separator) const { + TokenList tmp(begin(), end()); + return android::base::Join(tmp, separator); + // TODO: Join should probably take an offset or iterators + } + + private: + static bool StartsWith(const std::string& larger, const std::string& smaller) { + if (larger.size() >= smaller.size()) { + return std::equal(smaller.begin(), smaller.end(), larger.begin()); + } + + return false; + } + + template + TokenRange RemoveIf(const TPredicate& predicate) const { + // If any of the tokens in the token lists are empty, then + // we need to remove them and compress the token list into a smaller one. + bool remove = false; + for (auto it = begin_; it != end_; ++it) { + auto&& token = *it; + + if (predicate(token)) { + remove = true; + break; + } + } + + // Actually copy the token list and remove the tokens that don't match our predicate. + if (remove) { + auto token_list = std::make_shared(begin(), end()); + TokenList::iterator new_end = + std::remove_if(token_list->begin(), token_list->end(), predicate); + token_list->erase(new_end, token_list->end()); + + assert(token_list_->size() > token_list->size() && "Nothing was actually removed!"); + + return TokenRange(token_list); + } + + return *this; + } + + const std::shared_ptr> token_list_; + const iterator begin_; + const iterator end_; +}; +} // namespace art + +#endif // ART_CMDLINE_TOKEN_RANGE_H_ diff --git a/cmdline/unit.h b/cmdline/unit.h new file mode 100644 index 0000000..f73981f --- /dev/null +++ b/cmdline/unit.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_UNIT_H_ +#define ART_CMDLINE_UNIT_H_ + +namespace art { + +// Used for arguments that simply indicate presence (e.g. "-help") without any values. +struct Unit { + // Historical note: We specified a user-defined constructor to avoid + // 'Conditional jump or move depends on uninitialised value(s)' errors + // when running Valgrind. + Unit() {} + Unit(const Unit&) = default; + ~Unit() {} + bool operator==(Unit) const { + return true; + } +}; + +} // namespace art + +#endif // ART_CMDLINE_UNIT_H_ diff --git a/compiler/Android.bp b/compiler/Android.bp new file mode 100644 index 0000000..cbfff89 --- /dev/null +++ b/compiler/Android.bp @@ -0,0 +1,490 @@ +// +// Copyright (C) 2012 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// TODO We should really separate out those files that are actually needed for both variants of an +// architecture into its own category. Currently we just include all of the 32bit variant in the +// 64bit variant. It also might be good to allow one to compile only the 64bit variant without the +// 32bit one. + +art_cc_defaults { + name: "libart-compiler-defaults", + defaults: ["art_defaults"], + host_supported: true, + srcs: [ + "compiled_method.cc", + "debug/elf_debug_writer.cc", + "dex/inline_method_analyser.cc", + "dex/verified_method.cc", + "dex/verification_results.cc", + "driver/compiled_method_storage.cc", + "driver/compiler_options.cc", + "driver/dex_compilation_unit.cc", + "jit/jit_compiler.cc", + "jit/jit_logger.cc", + "jni/quick/calling_convention.cc", + "jni/quick/jni_compiler.cc", + "optimizing/block_builder.cc", + "optimizing/bounds_check_elimination.cc", + "optimizing/builder.cc", + "optimizing/cha_guard_optimization.cc", + "optimizing/code_generator.cc", + "optimizing/code_generator_utils.cc", + "optimizing/code_sinking.cc", + "optimizing/constant_folding.cc", + "optimizing/constructor_fence_redundancy_elimination.cc", + "optimizing/data_type.cc", + "optimizing/dead_code_elimination.cc", + "optimizing/escape.cc", + "optimizing/graph_checker.cc", + "optimizing/graph_visualizer.cc", + "optimizing/gvn.cc", + "optimizing/induction_var_analysis.cc", + "optimizing/induction_var_range.cc", + "optimizing/inliner.cc", + "optimizing/instruction_builder.cc", + "optimizing/instruction_simplifier.cc", + "optimizing/intrinsic_objects.cc", + "optimizing/intrinsics.cc", + "optimizing/licm.cc", + "optimizing/linear_order.cc", + "optimizing/load_store_analysis.cc", + "optimizing/load_store_elimination.cc", + "optimizing/locations.cc", + "optimizing/loop_analysis.cc", + "optimizing/loop_optimization.cc", + "optimizing/nodes.cc", + "optimizing/optimization.cc", + "optimizing/optimizing_compiler.cc", + "optimizing/parallel_move_resolver.cc", + "optimizing/prepare_for_register_allocation.cc", + "optimizing/reference_type_propagation.cc", + "optimizing/register_allocation_resolver.cc", + "optimizing/register_allocator.cc", + "optimizing/register_allocator_graph_color.cc", + "optimizing/register_allocator_linear_scan.cc", + "optimizing/select_generator.cc", + "optimizing/scheduler.cc", + "optimizing/sharpening.cc", + "optimizing/side_effects_analysis.cc", + "optimizing/ssa_builder.cc", + "optimizing/ssa_liveness_analysis.cc", + "optimizing/ssa_phi_elimination.cc", + "optimizing/stack_map_stream.cc", + "optimizing/superblock_cloner.cc", + "trampolines/trampoline_compiler.cc", + "utils/assembler.cc", + "utils/jni_macro_assembler.cc", + "utils/swap_space.cc", + "compiler.cc", + ], + + codegen: { + arm: { + srcs: [ + "jni/quick/arm/calling_convention_arm.cc", + "optimizing/code_generator_arm_vixl.cc", + "optimizing/code_generator_vector_arm_vixl.cc", + "optimizing/instruction_simplifier_arm.cc", + "optimizing/instruction_simplifier_shared.cc", + "optimizing/intrinsics_arm_vixl.cc", + "optimizing/nodes_shared.cc", + "optimizing/scheduler_arm.cc", + "utils/arm/assembler_arm_vixl.cc", + "utils/arm/constants_arm.cc", + "utils/arm/jni_macro_assembler_arm_vixl.cc", + "utils/arm/managed_register_arm.cc", + ], + }, + arm64: { + srcs: [ + "jni/quick/arm64/calling_convention_arm64.cc", + "optimizing/code_generator_arm64.cc", + "optimizing/code_generator_vector_arm64.cc", + "optimizing/scheduler_arm64.cc", + "optimizing/instruction_simplifier_arm64.cc", + "optimizing/intrinsics_arm64.cc", + "utils/arm64/assembler_arm64.cc", + "utils/arm64/jni_macro_assembler_arm64.cc", + "utils/arm64/managed_register_arm64.cc", + ], + }, + x86: { + srcs: [ + "jni/quick/x86/calling_convention_x86.cc", + "optimizing/code_generator_x86.cc", + "optimizing/code_generator_vector_x86.cc", + "optimizing/intrinsics_x86.cc", + "optimizing/instruction_simplifier_x86_shared.cc", + "optimizing/instruction_simplifier_x86.cc", + "optimizing/pc_relative_fixups_x86.cc", + "optimizing/x86_memory_gen.cc", + "utils/x86/assembler_x86.cc", + "utils/x86/jni_macro_assembler_x86.cc", + "utils/x86/managed_register_x86.cc", + ], + }, + x86_64: { + srcs: [ + "jni/quick/x86_64/calling_convention_x86_64.cc", + "optimizing/intrinsics_x86_64.cc", + "optimizing/instruction_simplifier_x86_64.cc", + "optimizing/code_generator_x86_64.cc", + "optimizing/code_generator_vector_x86_64.cc", + "utils/x86_64/assembler_x86_64.cc", + "utils/x86_64/jni_macro_assembler_x86_64.cc", + "utils/x86_64/managed_register_x86_64.cc", + ], + }, + }, + generated_sources: ["art_compiler_operator_srcs"], + shared_libs: [ + "libbase", + ], + header_libs: [ + "art_cmdlineparser_headers", // For compiler_options. + "art_disassembler_headers", + "libnativehelper_header_only", + ], + + export_include_dirs: ["."], +} + +cc_defaults { + name: "libart-compiler_static_base_defaults", + static_libs: [ + "libbase", + ], +} + +gensrcs { + name: "art_compiler_operator_srcs", + cmd: "$(location generate_operator_out) art/compiler $(in) > $(out)", + tools: ["generate_operator_out"], + srcs: [ + "driver/compiler_options.h", + "linker/linker_patch.h", + "optimizing/locations.h", + "optimizing/optimizing_compiler_stats.h", + + "utils/arm/constants_arm.h", + ], + output_extension: "operator_out.cc", +} + +art_cc_library { + name: "libart-compiler", + defaults: [ + "libart-compiler-defaults", + "dex2oat-pgo-defaults", + ], + codegen: { + arm: { + // VIXL assembly support for ARM targets. + static: { + whole_static_libs: [ + "libvixl", + ], + }, + shared: { + shared_libs: [ + "libvixl", + ], + // Export vixl headers as they are included in this library's exported headers. + export_shared_lib_headers: [ + "libvixl", + ], + }, + }, + arm64: { + // VIXL assembly support for ARM64 targets. + static: { + whole_static_libs: [ + "libvixl", + ], + }, + shared: { + shared_libs: [ + "libvixl", + ], + // Export vixl headers as they are included in this library's exported headers. + export_shared_lib_headers: [ + "libvixl", + ], + }, + }, + }, + shared_libs: [ + "libart", + "libartbase", + "libartpalette", + "libprofile", + "libdexfile", + ], + whole_static_libs: ["libelffile"], + runtime_libs: [ + // `art::HGraphVisualizerDisassembler::HGraphVisualizerDisassembler` may dynamically load + // `libart-disassembler.so`. + "libart-disassembler", + ], + + target: { + android: { + lto: { + thin: true, + }, + }, + }, + apex_available: [ + "com.android.art.release", + "com.android.art.debug", + ], +} + +cc_defaults { + name: "libart-compiler_static_defaults", + defaults: [ + "libart-compiler_static_base_defaults", + "libart_static_defaults", + "libartbase_static_defaults", + "libdexfile_static_defaults", + "libprofile_static_defaults", + ], + static_libs: ["libart-compiler"], +} + +art_cc_library { + name: "libartd-compiler", + defaults: [ + "art_debug_defaults", + "libart-compiler-defaults", + ], + codegen: { + arm: { + // VIXL assembly support for ARM targets. + static: { + whole_static_libs: [ + "libvixld", + ], + }, + shared: { + shared_libs: [ + "libvixld", + ], + // Export vixl headers as they are included in this library's exported headers. + export_shared_lib_headers: [ + "libvixld", + ], + }, + }, + arm64: { + // VIXL assembly support for ARM64 targets. + static: { + whole_static_libs: [ + "libvixld", + ], + }, + shared: { + shared_libs: [ + "libvixld", + ], + // Export vixl headers as they are included in this library's exported headers. + export_shared_lib_headers: [ + "libvixld", + ], + }, + }, + }, + shared_libs: [ + "libartbased", + "libartd", + "libartpalette", + "libprofiled", + "libdexfiled", + ], + whole_static_libs: ["libelffiled"], + runtime_libs: [ + // `art::HGraphVisualizerDisassembler::HGraphVisualizerDisassembler` may dynamically load + // `libartd-disassembler.so`. + "libartd-disassembler", + ], + + apex_available: [ + "com.android.art.debug", + ], +} + +cc_defaults { + name: "libartd-compiler_static_defaults", + defaults: [ + "libart-compiler_static_base_defaults", + "libartd_static_defaults", + "libartbased_static_defaults", + "libdexfiled_static_defaults", + "libprofiled_static_defaults", + ], + static_libs: ["libartd-compiler"], +} + +art_cc_library { + name: "libart-compiler-gtest", + defaults: ["libart-gtest-defaults"], + srcs: [ + "common_compiler_test.cc", + ], + shared_libs: [ + "libartd-compiler", + "libartd-disassembler", + "libartbase-art-gtest", + "libart-runtime-gtest", + "libbase", + ], +} + +art_cc_test { + name: "art_compiler_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: [ + "debug/dwarf/dwarf_test.cc", + "debug/src_map_elem_test.cc", + "driver/compiled_method_storage_test.cc", + "exception_test.cc", + "jni/jni_compiler_test.cc", + "linker/linker_patch_test.cc", + "linker/output_stream_test.cc", + "optimizing/bounds_check_elimination_test.cc", + "optimizing/superblock_cloner_test.cc", + "optimizing/data_type_test.cc", + "optimizing/dominator_test.cc", + "optimizing/find_loops_test.cc", + "optimizing/graph_checker_test.cc", + "optimizing/graph_test.cc", + "optimizing/gvn_test.cc", + "optimizing/induction_var_analysis_test.cc", + "optimizing/induction_var_range_test.cc", + "optimizing/licm_test.cc", + "optimizing/live_interval_test.cc", + "optimizing/loop_optimization_test.cc", + "optimizing/nodes_test.cc", + "optimizing/nodes_vector_test.cc", + "optimizing/parallel_move_test.cc", + "optimizing/pretty_printer_test.cc", + "optimizing/reference_type_propagation_test.cc", + "optimizing/select_generator_test.cc", + "optimizing/side_effects_test.cc", + "optimizing/ssa_liveness_analysis_test.cc", + "optimizing/ssa_test.cc", + "optimizing/stack_map_test.cc", + "optimizing/suspend_check_test.cc", + "utils/atomic_dex_ref_map_test.cc", + "utils/dedupe_set_test.cc", + "utils/swap_space_test.cc", + + "jni/jni_cfi_test.cc", + "optimizing/codegen_test.cc", + "optimizing/load_store_analysis_test.cc", + "optimizing/load_store_elimination_test.cc", + "optimizing/optimizing_cfi_test.cc", + "optimizing/scheduler_test.cc", + ], + + codegen: { + arm: { + srcs: [ + "utils/arm/managed_register_arm_test.cc", + ], + }, + arm64: { + srcs: [ + "utils/arm64/managed_register_arm64_test.cc", + ], + }, + x86: { + srcs: [ + "utils/x86/managed_register_x86_test.cc", + + // These tests are testing architecture-independent + // functionality, but happen to use x86 codegen as part of the + // test. + "optimizing/constant_folding_test.cc", + "optimizing/dead_code_elimination_test.cc", + "optimizing/linearize_test.cc", + "optimizing/live_ranges_test.cc", + "optimizing/liveness_test.cc", + "optimizing/register_allocator_test.cc", + ], + }, + x86_64: { + srcs: [ + // Is this test a bit-rotten copy of the x86 test? b/77951326 + // "utils/x86_64/managed_register_x86_64_test.cc", + ], + }, + }, + + header_libs: [ + "libart_simulator_headers", + "libnativehelper_header_only", + ], + + shared_libs: [ + "libprofiled", + "libartd-compiler", + "libartd-simulator-container", + "libvixld", + + "libbacktrace", + "libnativeloader", + ], + + target: { + host: { + shared_libs: [ + "libartd-simulator", + ], + }, + }, +} + +art_cc_test { + name: "art_compiler_host_tests", + device_supported: false, + defaults: [ + "art_gtest_defaults", + ], + codegen: { + arm: { + srcs: [ + "utils/assembler_thumb_test.cc", + ], + }, + x86: { + srcs: [ + "utils/x86/assembler_x86_test.cc", + ], + }, + x86_64: { + srcs: [ + "utils/x86_64/assembler_x86_64_test.cc", + ], + }, + }, + shared_libs: [ + "libartd-compiler", + "libvixld", + ], +} diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h new file mode 100644 index 0000000..9755ef1 --- /dev/null +++ b/compiler/cfi_test.h @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_CFI_TEST_H_ +#define ART_COMPILER_CFI_TEST_H_ + +#include +#include +#include + +#include "arch/instruction_set.h" +#include "base/enums.h" +#include "debug/dwarf/dwarf_test.h" +#include "disassembler.h" +#include "dwarf/dwarf_constants.h" +#include "dwarf/headers.h" +#include "gtest/gtest.h" +#include "thread.h" + +namespace art { + +class CFITest : public dwarf::DwarfTest { + public: + void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str, + ArrayRef actual_asm, + ArrayRef actual_cfi) { + std::vector lines; + // Print the raw bytes. + fprintf(f, "static constexpr uint8_t expected_asm_%s[] = {", isa_str); + HexDump(f, actual_asm); + fprintf(f, "\n};\n"); + fprintf(f, "static constexpr uint8_t expected_cfi_%s[] = {", isa_str); + HexDump(f, actual_cfi); + fprintf(f, "\n};\n"); + // Pretty-print CFI opcodes. + constexpr bool is64bit = false; + dwarf::DebugFrameOpCodeWriter<> initial_opcodes; + dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, &debug_frame_data_); + std::vector debug_frame_patches; + dwarf::WriteFDE(is64bit, + /* cie_pointer= */ 0, + /* code_address= */ 0, + actual_asm.size(), + actual_cfi, + &debug_frame_data_); + ReformatCfi(Objdump(false, "-W"), &lines); + // Pretty-print assembly. + const uint8_t* asm_base = actual_asm.data(); + const uint8_t* asm_end = asm_base + actual_asm.size(); + auto* opts = new DisassemblerOptions(false, + asm_base, + asm_end, + true, + is64bit + ? &Thread::DumpThreadOffset + : &Thread::DumpThreadOffset); + std::unique_ptr disasm(Disassembler::Create(isa, opts)); + std::stringstream stream; + const uint8_t* base = actual_asm.data() + (isa == InstructionSet::kThumb2 ? 1 : 0); + disasm->Dump(stream, base, base + actual_asm.size()); + ReformatAsm(&stream, &lines); + // Print CFI and assembly interleaved. + std::stable_sort(lines.begin(), lines.end(), CompareByAddress); + for (const std::string& line : lines) { + fprintf(f, "// %s\n", line.c_str()); + } + fprintf(f, "\n"); + } + + private: + // Helper - get offset just past the end of given string. + static size_t FindEndOf(const std::string& str, const char* substr) { + size_t pos = str.find(substr); + CHECK_NE(std::string::npos, pos); + return pos + strlen(substr); + } + + // Spit to lines and remove raw instruction bytes. + static void ReformatAsm(std::stringstream* stream, + std::vector* output) { + std::string line; + while (std::getline(*stream, line)) { + line = line.substr(0, FindEndOf(line, ": ")) + + line.substr(FindEndOf(line, "\t")); + size_t pos; + while ((pos = line.find(" ")) != std::string::npos) { + line = line.replace(pos, 2, " "); + } + while (!line.empty() && line.back() == ' ') { + line.pop_back(); + } + output->push_back(line); + } + } + + // Find interesting parts of objdump output and prefix the lines with address. + static void ReformatCfi(const std::vector& lines, + std::vector* output) { + std::string address; + for (const std::string& line : lines) { + if (line.find("DW_CFA_nop") != std::string::npos) { + // Ignore. + } else if (line.find("DW_CFA_advance_loc") != std::string::npos) { + // The last 8 characters are the address. + address = "0x" + line.substr(line.size() - 8); + } else if (line.find("DW_CFA_") != std::string::npos) { + std::string new_line(line); + // "bad register" warning is caused by always using host (x86) objdump. + const char* bad_reg = "bad register: "; + size_t pos; + if ((pos = new_line.find(bad_reg)) != std::string::npos) { + new_line = new_line.replace(pos, strlen(bad_reg), ""); + } + // Remove register names in parentheses since they have x86 names. + if ((pos = new_line.find(" (")) != std::string::npos) { + new_line = new_line.replace(pos, FindEndOf(new_line, ")") - pos, ""); + } + // Use the .cfi_ prefix. + new_line = ".cfi_" + new_line.substr(FindEndOf(new_line, "DW_CFA_")); + output->push_back(address + ": " + new_line); + } + } + } + + // Compare strings by the address prefix. + static bool CompareByAddress(const std::string& lhs, const std::string& rhs) { + EXPECT_EQ(lhs[10], ':'); + EXPECT_EQ(rhs[10], ':'); + return strncmp(lhs.c_str(), rhs.c_str(), 10) < 0; + } + + // Pretty-print byte array. 12 bytes per line. + static void HexDump(FILE* f, ArrayRef data) { + for (size_t i = 0; i < data.size(); i++) { + fprintf(f, i % 12 == 0 ? "\n " : " "); // Whitespace. + fprintf(f, "0x%02X,", data[i]); + } + } +}; + +} // namespace art + +#endif // ART_COMPILER_CFI_TEST_H_ diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc new file mode 100644 index 0000000..aec6646 --- /dev/null +++ b/compiler/common_compiler_test.cc @@ -0,0 +1,251 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_compiler_test.h" + +#include + +#include "arch/instruction_set_features.h" +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/callee_save_type.h" +#include "base/casts.h" +#include "base/enums.h" +#include "base/utils.h" +#include "class_linker.h" +#include "compiled_method-inl.h" +#include "dex/descriptors_names.h" +#include "dex/verification_results.h" +#include "driver/compiled_method_storage.h" +#include "driver/compiler_options.h" +#include "jni/java_vm_ext.h" +#include "interpreter/interpreter.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" +#include "mirror/object-inl.h" +#include "oat_quick_method_header.h" +#include "scoped_thread_state_change-inl.h" +#include "thread-current-inl.h" +#include "utils/atomic_dex_ref_map-inl.h" + +namespace art { + +CommonCompilerTest::CommonCompilerTest() {} +CommonCompilerTest::~CommonCompilerTest() {} + +void CommonCompilerTest::MakeExecutable(ArtMethod* method, const CompiledMethod* compiled_method) { + CHECK(method != nullptr); + // If the code size is 0 it means the method was skipped due to profile guided compilation. + if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) { + ArrayRef code = compiled_method->GetQuickCode(); + const uint32_t code_size = code.size(); + ArrayRef vmap_table = compiled_method->GetVmapTable(); + const uint32_t vmap_table_offset = vmap_table.empty() ? 0u + : sizeof(OatQuickMethodHeader) + vmap_table.size(); + OatQuickMethodHeader method_header(vmap_table_offset, code_size); + + header_code_and_maps_chunks_.push_back(std::vector()); + std::vector* chunk = &header_code_and_maps_chunks_.back(); + const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); + const size_t size = vmap_table.size() + sizeof(method_header) + code_size; + chunk->reserve(size + max_padding); + chunk->resize(sizeof(method_header)); + static_assert(std::is_trivially_copyable::value, "Cannot use memcpy"); + memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); + chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); + chunk->insert(chunk->end(), code.begin(), code.end()); + CHECK_EQ(chunk->size(), size); + const void* unaligned_code_ptr = chunk->data() + (size - code_size); + size_t offset = dchecked_integral_cast(reinterpret_cast(unaligned_code_ptr)); + size_t padding = compiled_method->AlignCode(offset) - offset; + // Make sure no resizing takes place. + CHECK_GE(chunk->capacity(), chunk->size() + padding); + chunk->insert(chunk->begin(), padding, 0); + const void* code_ptr = reinterpret_cast(unaligned_code_ptr) + padding; + CHECK_EQ(code_ptr, static_cast(chunk->data() + (chunk->size() - code_size))); + MakeExecutable(code_ptr, code.size()); + const void* method_code = CompiledMethod::CodePointer(code_ptr, + compiled_method->GetInstructionSet()); + LOG(INFO) << "MakeExecutable " << method->PrettyMethod() << " code=" << method_code; + method->SetEntryPointFromQuickCompiledCode(method_code); + } else { + // No code? You must mean to go into the interpreter. + // Or the generic JNI... + class_linker_->SetEntryPointsToInterpreter(method); + } +} + +void CommonCompilerTest::MakeExecutable(const void* code_start, size_t code_length) { + CHECK(code_start != nullptr); + CHECK_NE(code_length, 0U); + uintptr_t data = reinterpret_cast(code_start); + uintptr_t base = RoundDown(data, kPageSize); + uintptr_t limit = RoundUp(data + code_length, kPageSize); + uintptr_t len = limit - base; + // Remove hwasan tag. This is done in kernel in newer versions. This supports older kernels. + void* base_ptr = HWASanUntag(reinterpret_cast(base)); + int result = mprotect(base_ptr, len, PROT_READ | PROT_WRITE | PROT_EXEC); + CHECK_EQ(result, 0); + + CHECK(FlushCpuCaches(reinterpret_cast(base), reinterpret_cast(base + len))); +} + +void CommonCompilerTest::SetUp() { + CommonRuntimeTest::SetUp(); + { + ScopedObjectAccess soa(Thread::Current()); + + runtime_->SetInstructionSet(instruction_set_); + for (uint32_t i = 0; i < static_cast(CalleeSaveType::kLastCalleeSaveType); ++i) { + CalleeSaveType type = CalleeSaveType(i); + if (!runtime_->HasCalleeSaveMethod(type)) { + runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type); + } + } + } +} + +void CommonCompilerTest::ApplyInstructionSet() { + // Copy local instruction_set_ and instruction_set_features_ to *compiler_options_; + CHECK(instruction_set_features_ != nullptr); + if (instruction_set_ == InstructionSet::kThumb2) { + CHECK_EQ(InstructionSet::kArm, instruction_set_features_->GetInstructionSet()); + } else { + CHECK_EQ(instruction_set_, instruction_set_features_->GetInstructionSet()); + } + compiler_options_->instruction_set_ = instruction_set_; + compiler_options_->instruction_set_features_ = + InstructionSetFeatures::FromBitmap(instruction_set_, instruction_set_features_->AsBitmap()); + CHECK(compiler_options_->instruction_set_features_->Equals(instruction_set_features_.get())); +} + +void CommonCompilerTest::OverrideInstructionSetFeatures(InstructionSet instruction_set, + const std::string& variant) { + instruction_set_ = instruction_set; + std::string error_msg; + instruction_set_features_ = + InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg); + CHECK(instruction_set_features_ != nullptr) << error_msg; + + if (compiler_options_ != nullptr) { + ApplyInstructionSet(); + } +} + +void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) { + CommonRuntimeTest::SetUpRuntimeOptions(options); + + compiler_options_.reset(new CompilerOptions); + verification_results_.reset(new VerificationResults(compiler_options_.get())); + + ApplyInstructionSet(); +} + +Compiler::Kind CommonCompilerTest::GetCompilerKind() const { + return compiler_kind_; +} + +void CommonCompilerTest::SetCompilerKind(Compiler::Kind compiler_kind) { + compiler_kind_ = compiler_kind; +} + +void CommonCompilerTest::TearDown() { + verification_results_.reset(); + compiler_options_.reset(); + + CommonRuntimeTest::TearDown(); +} + +void CommonCompilerTest::CompileMethod(ArtMethod* method) { + CHECK(method != nullptr); + TimingLogger timings("CommonCompilerTest::CompileMethod", false, false); + TimingLogger::ScopedTiming t(__FUNCTION__, &timings); + CompiledMethodStorage storage(/*swap_fd=*/ -1); + CompiledMethod* compiled_method = nullptr; + { + DCHECK(!Runtime::Current()->IsStarted()); + Thread* self = Thread::Current(); + StackHandleScope<2> hs(self); + std::unique_ptr compiler( + Compiler::Create(*compiler_options_, &storage, compiler_kind_)); + const DexFile& dex_file = *method->GetDexFile(); + Handle dex_cache = hs.NewHandle(class_linker_->FindDexCache(self, dex_file)); + Handle class_loader = hs.NewHandle(method->GetClassLoader()); + compiler_options_->verification_results_ = verification_results_.get(); + if (method->IsNative()) { + compiled_method = compiler->JniCompile(method->GetAccessFlags(), + method->GetDexMethodIndex(), + dex_file, + dex_cache); + } else { + verification_results_->AddDexFile(&dex_file); + verification_results_->CreateVerifiedMethodFor( + MethodReference(&dex_file, method->GetDexMethodIndex())); + compiled_method = compiler->Compile(method->GetCodeItem(), + method->GetAccessFlags(), + method->GetInvokeType(), + method->GetClassDefIndex(), + method->GetDexMethodIndex(), + class_loader, + dex_file, + dex_cache); + } + compiler_options_->verification_results_ = nullptr; + } + CHECK(method != nullptr); + { + TimingLogger::ScopedTiming t2("MakeExecutable", &timings); + MakeExecutable(method, compiled_method); + } + CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&storage, compiled_method); +} + +void CommonCompilerTest::CompileDirectMethod(Handle class_loader, + const char* class_name, const char* method_name, + const char* signature) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + ObjPtr klass = + class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size); + CHECK(method != nullptr && method->IsDirect()) << "Direct method not found: " + << class_name << "." << method_name << signature; + CompileMethod(method); +} + +void CommonCompilerTest::CompileVirtualMethod(Handle class_loader, + const char* class_name, const char* method_name, + const char* signature) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + ObjPtr klass = + class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size); + CHECK(method != nullptr && !method->IsDirect()) << "Virtual method not found: " + << class_name << "." << method_name << signature; + CompileMethod(method); +} + +void CommonCompilerTest::ClearBootImageOption() { + compiler_options_->image_type_ = CompilerOptions::ImageType::kNone; +} + +} // namespace art diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h new file mode 100644 index 0000000..4f4e49a --- /dev/null +++ b/compiler/common_compiler_test.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_ +#define ART_COMPILER_COMMON_COMPILER_TEST_H_ + +#include +#include + +#include + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "common_runtime_test.h" +#include "compiler.h" +#include "oat_file.h" + +namespace art { +namespace mirror { +class ClassLoader; +} // namespace mirror + +class CompiledMethod; +class CompilerOptions; +class CumulativeLogger; +class DexFile; +class TimingLogger; +class VerificationResults; + +template class Handle; + +class CommonCompilerTest : public CommonRuntimeTest { + public: + CommonCompilerTest(); + ~CommonCompilerTest(); + + void MakeExecutable(ArtMethod* method, const CompiledMethod* compiled_method) + REQUIRES_SHARED(Locks::mutator_lock_); + + static void MakeExecutable(const void* code_start, size_t code_length); + + protected: + void SetUp() override; + + void SetUpRuntimeOptions(RuntimeOptions* options) override; + + Compiler::Kind GetCompilerKind() const; + void SetCompilerKind(Compiler::Kind compiler_kind); + + virtual CompilerFilter::Filter GetCompilerFilter() const { + return CompilerFilter::kDefaultCompilerFilter; + } + + void TearDown() override; + + void CompileMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); + + void CompileDirectMethod(Handle class_loader, const char* class_name, + const char* method_name, const char* signature) + REQUIRES_SHARED(Locks::mutator_lock_); + + void CompileVirtualMethod(Handle class_loader, const char* class_name, + const char* method_name, const char* signature) + REQUIRES_SHARED(Locks::mutator_lock_); + + void ApplyInstructionSet(); + void OverrideInstructionSetFeatures(InstructionSet instruction_set, const std::string& variant); + + void ClearBootImageOption(); + + Compiler::Kind compiler_kind_ = Compiler::kOptimizing; + + InstructionSet instruction_set_ = + (kRuntimeISA == InstructionSet::kArm) ? InstructionSet::kThumb2 : kRuntimeISA; + // Take the default set of instruction features from the build. + std::unique_ptr instruction_set_features_ + = InstructionSetFeatures::FromCppDefines(); + + std::unique_ptr compiler_options_; + std::unique_ptr verification_results_; + + private: + // Chunks must not move their storage after being created - use the node-based std::list. + std::list> header_code_and_maps_chunks_; +}; + +} // namespace art + +#endif // ART_COMPILER_COMMON_COMPILER_TEST_H_ diff --git a/compiler/compiled_method-inl.h b/compiler/compiled_method-inl.h new file mode 100644 index 0000000..e60b30f --- /dev/null +++ b/compiler/compiled_method-inl.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILED_METHOD_INL_H_ +#define ART_COMPILER_COMPILED_METHOD_INL_H_ + +#include "compiled_method.h" + +#include "base/array_ref.h" +#include "base/length_prefixed_array.h" +#include "linker/linker_patch.h" + +namespace art { + +inline ArrayRef CompiledCode::GetQuickCode() const { + return GetArray(quick_code_); +} + +template +inline ArrayRef CompiledCode::GetArray(const LengthPrefixedArray* array) { + if (array == nullptr) { + return ArrayRef(); + } + DCHECK_NE(array->size(), 0u); + return ArrayRef(&array->At(0), array->size()); +} + +inline ArrayRef CompiledMethod::GetVmapTable() const { + return GetArray(vmap_table_); +} + +inline ArrayRef CompiledMethod::GetCFIInfo() const { + return GetArray(cfi_info_); +} + +inline ArrayRef CompiledMethod::GetPatches() const { + return GetArray(patches_); +} + +} // namespace art + +#endif // ART_COMPILER_COMPILED_METHOD_INL_H_ diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc new file mode 100644 index 0000000..03b87ef --- /dev/null +++ b/compiler/compiled_method.cc @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiled_method.h" + +#include "driver/compiled_method_storage.h" +#include "utils/swap_space.h" + +namespace art { + +CompiledCode::CompiledCode(CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code) + : storage_(storage), + quick_code_(storage->DeduplicateCode(quick_code)), + packed_fields_(InstructionSetField::Encode(instruction_set)) { +} + +CompiledCode::~CompiledCode() { + GetStorage()->ReleaseCode(quick_code_); +} + +bool CompiledCode::operator==(const CompiledCode& rhs) const { + if (quick_code_ != nullptr) { + if (rhs.quick_code_ == nullptr) { + return false; + } else if (quick_code_->size() != rhs.quick_code_->size()) { + return false; + } else { + return std::equal(quick_code_->begin(), quick_code_->end(), rhs.quick_code_->begin()); + } + } + return (rhs.quick_code_ == nullptr); +} + +size_t CompiledCode::AlignCode(size_t offset) const { + return AlignCode(offset, GetInstructionSet()); +} + +size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) { + return RoundUp(offset, GetInstructionSetAlignment(instruction_set)); +} + +size_t CompiledCode::CodeDelta() const { + return CodeDelta(GetInstructionSet()); +} + +size_t CompiledCode::CodeDelta(InstructionSet instruction_set) { + switch (instruction_set) { + case InstructionSet::kArm: + case InstructionSet::kArm64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: + return 0; + case InstructionSet::kThumb2: { + // +1 to set the low-order bit so a BLX will switch to Thumb mode + return 1; + } + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) { + switch (instruction_set) { + case InstructionSet::kArm: + case InstructionSet::kArm64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: + return code_pointer; + case InstructionSet::kThumb2: { + uintptr_t address = reinterpret_cast(code_pointer); + // Set the low-order bit so a BLX will switch to Thumb mode + address |= 0x1; + return reinterpret_cast(address); + } + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +CompiledMethod::CompiledMethod(CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches) + : CompiledCode(storage, instruction_set, quick_code), + vmap_table_(storage->DeduplicateVMapTable(vmap_table)), + cfi_info_(storage->DeduplicateCFIInfo(cfi_info)), + patches_(storage->DeduplicateLinkerPatches(patches)) { +} + +CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( + CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches) { + SwapAllocator alloc(storage->GetSwapSpaceAllocator()); + CompiledMethod* ret = alloc.allocate(1); + alloc.construct(ret, + storage, + instruction_set, + quick_code, + vmap_table, + cfi_info, patches); + return ret; +} + +void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage, + CompiledMethod* m) { + SwapAllocator alloc(storage->GetSwapSpaceAllocator()); + alloc.destroy(m); + alloc.deallocate(m, 1); +} + +CompiledMethod::~CompiledMethod() { + CompiledMethodStorage* storage = GetStorage(); + storage->ReleaseLinkerPatches(patches_); + storage->ReleaseCFIInfo(cfi_info_); + storage->ReleaseVMapTable(vmap_table_); +} + +} // namespace art diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h new file mode 100644 index 0000000..e92777f --- /dev/null +++ b/compiler/compiled_method.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILED_METHOD_H_ +#define ART_COMPILER_COMPILED_METHOD_H_ + +#include +#include +#include + +#include "arch/instruction_set.h" +#include "base/bit_field.h" +#include "base/bit_utils.h" + +namespace art { + +template class ArrayRef; +class CompiledMethodStorage; +template class LengthPrefixedArray; + +namespace linker { +class LinkerPatch; +} // namespace linker + +class CompiledCode { + public: + // For Quick to supply an code blob + CompiledCode(CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code); + + virtual ~CompiledCode(); + + InstructionSet GetInstructionSet() const { + return GetPackedField(); + } + + ArrayRef GetQuickCode() const; + + bool operator==(const CompiledCode& rhs) const; + + // To align an offset from a page-aligned value to make it suitable + // for code storage. For example on ARM, to ensure that PC relative + // valu computations work out as expected. + size_t AlignCode(size_t offset) const; + static size_t AlignCode(size_t offset, InstructionSet instruction_set); + + // returns the difference between the code address and a usable PC. + // mainly to cope with kThumb2 where the lower bit must be set. + size_t CodeDelta() const; + static size_t CodeDelta(InstructionSet instruction_set); + + // Returns a pointer suitable for invoking the code at the argument + // code_pointer address. Mainly to cope with kThumb2 where the + // lower bit must be set to indicate Thumb mode. + static const void* CodePointer(const void* code_pointer, InstructionSet instruction_set); + + protected: + static constexpr size_t kInstructionSetFieldSize = + MinimumBitsToStore(static_cast(InstructionSet::kLast)); + static constexpr size_t kNumberOfCompiledCodePackedBits = kInstructionSetFieldSize; + static constexpr size_t kMaxNumberOfPackedBits = sizeof(uint32_t) * kBitsPerByte; + + template + static ArrayRef GetArray(const LengthPrefixedArray* array); + + CompiledMethodStorage* GetStorage() { + return storage_; + } + + template + typename BitFieldType::value_type GetPackedField() const { + return BitFieldType::Decode(packed_fields_); + } + + template + void SetPackedField(typename BitFieldType::value_type value) { + DCHECK(IsUint(static_cast(value))); + packed_fields_ = BitFieldType::Update(value, packed_fields_); + } + + private: + using InstructionSetField = BitField; + + CompiledMethodStorage* const storage_; + + // Used to store the compiled code. + const LengthPrefixedArray* const quick_code_; + + uint32_t packed_fields_; +}; + +class CompiledMethod final : public CompiledCode { + public: + // Constructs a CompiledMethod. + // Note: Consider using the static allocation methods below that will allocate the CompiledMethod + // in the swap space. + CompiledMethod(CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches); + + virtual ~CompiledMethod(); + + static CompiledMethod* SwapAllocCompiledMethod( + CompiledMethodStorage* storage, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches); + + static void ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage, CompiledMethod* m); + + bool IsIntrinsic() const { + return GetPackedField(); + } + + // Marks the compiled method as being generated using an intrinsic codegen. + // Such methods have no relationships to their code items. + // This affects debug information generated at link time. + void MarkAsIntrinsic() { + DCHECK(!IsIntrinsic()); + SetPackedField(/* value= */ true); + } + + ArrayRef GetVmapTable() const; + + ArrayRef GetCFIInfo() const; + + ArrayRef GetPatches() const; + + private: + static constexpr size_t kIsIntrinsicLsb = kNumberOfCompiledCodePackedBits; + static constexpr size_t kIsIntrinsicSize = 1u; + static constexpr size_t kNumberOfCompiledMethodPackedBits = kIsIntrinsicLsb + kIsIntrinsicSize; + static_assert(kNumberOfCompiledMethodPackedBits <= CompiledCode::kMaxNumberOfPackedBits, + "Too many packed fields."); + + using IsIntrinsicField = BitField; + + // For quick code, holds code infos which contain stack maps, inline information, and etc. + const LengthPrefixedArray* const vmap_table_; + // For quick code, a FDE entry for the debug_frame section. + const LengthPrefixedArray* const cfi_info_; + // For quick code, linker patches needed by the method. + const LengthPrefixedArray* const patches_; +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILED_METHOD_H_ diff --git a/compiler/compiler.cc b/compiler/compiler.cc new file mode 100644 index 0000000..98d7339 --- /dev/null +++ b/compiler/compiler.cc @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler.h" + +#include + +#include "base/macros.h" +#include "base/utils.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file.h" +#include "oat.h" +#include "optimizing/optimizing_compiler.h" + +namespace art { + +Compiler* Compiler::Create(const CompilerOptions& compiler_options, + CompiledMethodStorage* storage, + Compiler::Kind kind) { + // Check that oat version when runtime was compiled matches the oat version of the compiler. + constexpr std::array compiler_oat_version = OatHeader::kOatVersion; + OatHeader::CheckOatVersion(compiler_oat_version); + switch (kind) { + case kQuick: + // TODO: Remove Quick in options. + case kOptimizing: + return CreateOptimizingCompiler(compiler_options, storage); + + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +bool Compiler::IsPathologicalCase(const dex::CodeItem& code_item, + uint32_t method_idx, + const DexFile& dex_file) { + /* + * Skip compilation for pathologically large methods - either by instruction count or num vregs. + * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter + * of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space. + */ + CodeItemDataAccessor accessor(dex_file, &code_item); + if (accessor.InsnsSizeInCodeUnits() >= UINT16_MAX / 4) { + LOG(INFO) << "Method exceeds compiler instruction limit: " + << accessor.InsnsSizeInCodeUnits() + << " in " << dex_file.PrettyMethod(method_idx); + return true; + } + if (accessor.RegistersSize() >= UINT16_MAX / 4) { + LOG(INFO) << "Method exceeds compiler virtual register limit: " + << accessor.RegistersSize() << " in " << dex_file.PrettyMethod(method_idx); + return true; + } + return false; +} + +} // namespace art diff --git a/compiler/compiler.h b/compiler/compiler.h new file mode 100644 index 0000000..e363e70 --- /dev/null +++ b/compiler/compiler.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILER_H_ +#define ART_COMPILER_COMPILER_H_ + +#include "base/mutex.h" +#include "base/os.h" +#include "dex/invoke_type.h" + +namespace art { + +namespace dex { +struct CodeItem; +} // namespace dex +namespace jit { +class JitCodeCache; +class JitLogger; +class JitMemoryRegion; +} // namespace jit +namespace mirror { +class ClassLoader; +class DexCache; +} // namespace mirror + +class ArtMethod; +class CompiledMethod; +class CompiledMethodStorage; +class CompilerOptions; +class DexFile; +template class Handle; +class Thread; + +class Compiler { + public: + enum Kind { + kQuick, + kOptimizing + }; + + static Compiler* Create(const CompilerOptions& compiler_options, + CompiledMethodStorage* storage, + Kind kind); + + virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0; + + virtual CompiledMethod* Compile(const dex::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + Handle class_loader, + const DexFile& dex_file, + Handle dex_cache) const = 0; + + virtual CompiledMethod* JniCompile(uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file, + Handle dex_cache) const = 0; + + virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED, + jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, + jit::JitMemoryRegion* region ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, + bool baseline ATTRIBUTE_UNUSED, + bool osr ATTRIBUTE_UNUSED, + jit::JitLogger* jit_logger ATTRIBUTE_UNUSED) + REQUIRES_SHARED(Locks::mutator_lock_) { + return false; + } + + virtual uintptr_t GetEntryPointOf(ArtMethod* method) const + REQUIRES_SHARED(Locks::mutator_lock_) = 0; + + uint64_t GetMaximumCompilationTimeBeforeWarning() const { + return maximum_compilation_time_before_warning_; + } + + virtual ~Compiler() {} + + // Returns whether the method to compile is such a pathological case that + // it's not worth compiling. + static bool IsPathologicalCase(const dex::CodeItem& code_item, + uint32_t method_idx, + const DexFile& dex_file); + + protected: + Compiler(const CompilerOptions& compiler_options, + CompiledMethodStorage* storage, + uint64_t warning) : + compiler_options_(compiler_options), + storage_(storage), + maximum_compilation_time_before_warning_(warning) { + } + + const CompilerOptions& GetCompilerOptions() const { + return compiler_options_; + } + + CompiledMethodStorage* GetCompiledMethodStorage() const { + return storage_; + } + + private: + const CompilerOptions& compiler_options_; + CompiledMethodStorage* const storage_; + const uint64_t maximum_compilation_time_before_warning_; + + DISALLOW_COPY_AND_ASSIGN(Compiler); +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILER_H_ diff --git a/compiler/debug/debug_info.h b/compiler/debug/debug_info.h new file mode 100644 index 0000000..04c6991 --- /dev/null +++ b/compiler/debug/debug_info.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DEBUG_INFO_H_ +#define ART_COMPILER_DEBUG_DEBUG_INFO_H_ + +#include + +#include "base/array_ref.h" +#include "method_debug_info.h" + +namespace art { +class DexFile; + +namespace debug { + +// References inputs for all debug information which can be written into the ELF file. +struct DebugInfo { + // Describes compiled code in the .text section. + ArrayRef compiled_methods; + + // Describes dex-files in the .dex section. + std::map dex_files; // Offset in section -> dex file content. + + bool Empty() const { + return compiled_methods.empty() && dex_files.empty(); + } +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_DEBUG_INFO_H_ diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc new file mode 100644 index 0000000..5946af8 --- /dev/null +++ b/compiler/debug/dwarf/dwarf_test.cc @@ -0,0 +1,333 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dwarf_test.h" + +#include "dwarf/debug_frame_opcode_writer.h" +#include "dwarf/debug_info_entry_writer.h" +#include "dwarf/debug_line_opcode_writer.h" +#include "dwarf/dwarf_constants.h" +#include "dwarf/headers.h" +#include "gtest/gtest.h" + +namespace art { +namespace dwarf { + +// Run the tests only on host since we need objdump. +#ifndef ART_TARGET_ANDROID + +TEST_F(DwarfTest, DebugFrame) { + const bool is64bit = false; + + // Pick offset value which would catch Uleb vs Sleb errors. + const int offset = 40000; + ASSERT_EQ(UnsignedLeb128Size(offset / 4), 2u); + ASSERT_EQ(SignedLeb128Size(offset / 4), 3u); + DW_CHECK("Data alignment factor: -4"); + const Reg reg(6); + + // Test the opcodes in the order mentioned in the spec. + // There are usually several encoding variations of each opcode. + DebugFrameOpCodeWriter<> opcodes; + DW_CHECK("FDE"); + int pc = 0; + for (int i : {0, 1, 0x3F, 0x40, 0xFF, 0x100, 0xFFFF, 0x10000}) { + pc += i; + opcodes.AdvancePC(pc); + } + DW_CHECK_NEXT("DW_CFA_advance_loc: 1 to 01000001"); + DW_CHECK_NEXT("DW_CFA_advance_loc: 63 to 01000040"); + DW_CHECK_NEXT("DW_CFA_advance_loc1: 64 to 01000080"); + DW_CHECK_NEXT("DW_CFA_advance_loc1: 255 to 0100017f"); + DW_CHECK_NEXT("DW_CFA_advance_loc2: 256 to 0100027f"); + DW_CHECK_NEXT("DW_CFA_advance_loc2: 65535 to 0101027e"); + DW_CHECK_NEXT("DW_CFA_advance_loc4: 65536 to 0102027e"); + opcodes.DefCFA(reg, offset); + DW_CHECK_NEXT("DW_CFA_def_cfa: r6 (esi) ofs 40000"); + opcodes.DefCFA(reg, -offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_sf: r6 (esi) ofs -40000"); + opcodes.DefCFARegister(reg); + DW_CHECK_NEXT("DW_CFA_def_cfa_register: r6 (esi)"); + opcodes.DefCFAOffset(offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 40000"); + opcodes.DefCFAOffset(-offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset_sf: -40000"); + uint8_t expr[] = { 0 }; + opcodes.DefCFAExpression(expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_def_cfa_expression"); + opcodes.Undefined(reg); + DW_CHECK_NEXT("DW_CFA_undefined: r6 (esi)"); + opcodes.SameValue(reg); + DW_CHECK_NEXT("DW_CFA_same_value: r6 (esi)"); + opcodes.Offset(Reg(0x3F), -offset); + DW_CHECK_NEXT("DW_CFA_offset: r63 at cfa-40000"); + opcodes.Offset(Reg(0x40), -offset); + DW_CHECK_NEXT("DW_CFA_offset_extended: r64 at cfa-40000"); + opcodes.Offset(Reg(0x40), offset); + DW_CHECK_NEXT("DW_CFA_offset_extended_sf: r64 at cfa+40000"); + opcodes.ValOffset(reg, -offset); + DW_CHECK_NEXT("DW_CFA_val_offset: r6 (esi) at cfa-40000"); + opcodes.ValOffset(reg, offset); + DW_CHECK_NEXT("DW_CFA_val_offset_sf: r6 (esi) at cfa+40000"); + opcodes.Register(reg, Reg(1)); + DW_CHECK_NEXT("DW_CFA_register: r6 (esi) in r1 (ecx)"); + opcodes.Expression(reg, expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_expression: r6 (esi)"); + opcodes.ValExpression(reg, expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_val_expression: r6 (esi)"); + opcodes.Restore(Reg(0x3F)); + DW_CHECK_NEXT("DW_CFA_restore: bad register: r63"); + opcodes.Restore(Reg(0x40)); + DW_CHECK_NEXT("DW_CFA_restore_extended: bad register: r64"); + opcodes.Restore(reg); + DW_CHECK_NEXT("DW_CFA_restore: r6 (esi)"); + opcodes.RememberState(); + DW_CHECK_NEXT("DW_CFA_remember_state"); + opcodes.RestoreState(); + DW_CHECK_NEXT("DW_CFA_restore_state"); + opcodes.Nop(); + DW_CHECK_NEXT("DW_CFA_nop"); + + // Also test helpers. + opcodes.DefCFA(Reg(4), 100); // ESP + DW_CHECK_NEXT("DW_CFA_def_cfa: r4 (esp) ofs 100"); + opcodes.AdjustCFAOffset(8); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 108"); + opcodes.RelOffset(Reg(0), 0); // push R0 + DW_CHECK_NEXT("DW_CFA_offset: r0 (eax) at cfa-108"); + opcodes.RelOffset(Reg(1), 4); // push R1 + DW_CHECK_NEXT("DW_CFA_offset: r1 (ecx) at cfa-104"); + opcodes.RelOffsetForMany(Reg(2), 8, 1 | (1 << 3), 4); // push R2 and R5 + DW_CHECK_NEXT("DW_CFA_offset: r2 (edx) at cfa-100"); + DW_CHECK_NEXT("DW_CFA_offset: r5 (ebp) at cfa-96"); + opcodes.RestoreMany(Reg(2), 1 | (1 << 3)); // pop R2 and R5 + DW_CHECK_NEXT("DW_CFA_restore: r2 (edx)"); + DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)"); + + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(is64bit ? 16 : 8), initial_opcodes, &debug_frame_data_); + WriteFDE(is64bit, + /* cie_pointer= */ 0, + 0x01000000, + 0x01000000, + ArrayRef(*opcodes.data()), + &debug_frame_data_); + + CheckObjdumpOutput(is64bit, "-W"); +} + +TEST_F(DwarfTest, DISABLED_DebugFrame64) { + constexpr bool is64bit = true; + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(16), initial_opcodes, &debug_frame_data_); + DebugFrameOpCodeWriter<> opcodes; + WriteFDE(is64bit, + /* cie_pointer= */ 0, + 0x0100000000000000, + 0x0200000000000000, + ArrayRef(*opcodes.data()), + &debug_frame_data_); + DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000"); + + CheckObjdumpOutput(is64bit, "-W"); +} + +// Test x86_64 register mapping. It is the only non-trivial architecture. +// ARM, X86, and Mips have: dwarf_reg = art_reg + constant. +TEST_F(DwarfTest, x86_64_RegisterMapping) { + constexpr bool is64bit = true; + DebugFrameOpCodeWriter<> opcodes; + for (int i = 0; i < 16; i++) { + opcodes.RelOffset(Reg::X86_64Core(i), 0); + } + DW_CHECK("FDE"); + DW_CHECK_NEXT("DW_CFA_offset: r0 (rax)"); + DW_CHECK_NEXT("DW_CFA_offset: r2 (rcx)"); + DW_CHECK_NEXT("DW_CFA_offset: r1 (rdx)"); + DW_CHECK_NEXT("DW_CFA_offset: r3 (rbx)"); + DW_CHECK_NEXT("DW_CFA_offset: r7 (rsp)"); + DW_CHECK_NEXT("DW_CFA_offset: r6 (rbp)"); + DW_CHECK_NEXT("DW_CFA_offset: r4 (rsi)"); + DW_CHECK_NEXT("DW_CFA_offset: r5 (rdi)"); + DW_CHECK_NEXT("DW_CFA_offset: r8 (r8)"); + DW_CHECK_NEXT("DW_CFA_offset: r9 (r9)"); + DW_CHECK_NEXT("DW_CFA_offset: r10 (r10)"); + DW_CHECK_NEXT("DW_CFA_offset: r11 (r11)"); + DW_CHECK_NEXT("DW_CFA_offset: r12 (r12)"); + DW_CHECK_NEXT("DW_CFA_offset: r13 (r13)"); + DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)"); + DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)"); + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(16), initial_opcodes, &debug_frame_data_); + WriteFDE(is64bit, + /* cie_pointer= */ 0, + 0x0100000000000000, + 0x0200000000000000, + ArrayRef(*opcodes.data()), + &debug_frame_data_); + + CheckObjdumpOutput(is64bit, "-W"); +} + +TEST_F(DwarfTest, DISABLED_DebugLine) { + const bool is64bit = false; + const int code_factor_bits = 1; + DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); + + std::vector include_directories; + include_directories.push_back("/path/to/source"); + DW_CHECK("/path/to/source"); + + std::vector files { + { "file0.c", 0, 1000, 2000 }, + { "file1.c", 1, 1000, 2000 }, + { "file2.c", 1, 1000, 2000 }, + }; + DW_CHECK("1\t0\t1000\t2000\tfile0.c"); + DW_CHECK_NEXT("2\t1\t1000\t2000\tfile1.c"); + DW_CHECK_NEXT("3\t1\t1000\t2000\tfile2.c"); + + DW_CHECK("Line Number Statements"); + opcodes.SetAddress(0x01000000); + DW_CHECK_NEXT("Extended opcode 2: set Address to 0x1000000"); + opcodes.AddRow(); + DW_CHECK_NEXT("Copy"); + opcodes.AdvancePC(0x01000100); + DW_CHECK_NEXT("Advance PC by 256 to 0x1000100"); + opcodes.SetFile(2); + DW_CHECK_NEXT("Set File Name to entry 2 in the File Name Table"); + opcodes.AdvanceLine(3); + DW_CHECK_NEXT("Advance Line by 2 to 3"); + opcodes.SetColumn(4); + DW_CHECK_NEXT("Set column to 4"); + opcodes.SetIsStmt(true); + DW_CHECK_NEXT("Set is_stmt to 1"); + opcodes.SetIsStmt(false); + DW_CHECK_NEXT("Set is_stmt to 0"); + opcodes.SetBasicBlock(); + DW_CHECK_NEXT("Set basic block"); + opcodes.SetPrologueEnd(); + DW_CHECK_NEXT("Set prologue_end to true"); + opcodes.SetEpilogueBegin(); + DW_CHECK_NEXT("Set epilogue_begin to true"); + opcodes.SetISA(5); + DW_CHECK_NEXT("Set ISA to 5"); + opcodes.EndSequence(); + DW_CHECK_NEXT("Extended opcode 1: End of Sequence"); + opcodes.DefineFile("file.c", 0, 1000, 2000); + DW_CHECK_NEXT("Extended opcode 3: define new File Table entry"); + DW_CHECK_NEXT("Entry\tDir\tTime\tSize\tName"); + DW_CHECK_NEXT("1\t0\t1000\t2000\tfile.c"); + + WriteDebugLineTable(include_directories, files, opcodes, &debug_line_data_); + + CheckObjdumpOutput(is64bit, "-W"); +} + +// DWARF has special one byte codes which advance PC and line at the same time. +TEST_F(DwarfTest, DebugLineSpecialOpcodes) { + const bool is64bit = false; + const int code_factor_bits = 1; + uint32_t pc = 0x01000000; + int line = 1; + DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); + opcodes.SetAddress(pc); + size_t num_rows = 0; + DW_CHECK("Line Number Statements:"); + DW_CHECK("Special opcode"); + DW_CHECK("Advance PC by constant"); + DW_CHECK("Decoded dump of debug contents of section .debug_line:"); + DW_CHECK("Line number Starting address"); + for (int addr_delta = 0; addr_delta < 80; addr_delta += 2) { + for (int line_delta = 16; line_delta >= -16; --line_delta) { + pc += addr_delta; + line += line_delta; + opcodes.AddRow(pc, line); + num_rows++; + ASSERT_EQ(opcodes.CurrentAddress(), pc); + ASSERT_EQ(opcodes.CurrentLine(), line); + char expected[1024]; + sprintf(expected, "%i 0x%x", line, pc); + DW_CHECK_NEXT(expected); + } + } + EXPECT_LT(opcodes.data()->size(), num_rows * 3); + + std::vector directories; + std::vector files = { { "file.c", 0, 1000, 2000 } }; + WriteDebugLineTable(directories, files, opcodes, &debug_line_data_); + + CheckObjdumpOutput(is64bit, "-W -WL"); +} + +TEST_F(DwarfTest, DebugInfo) { + constexpr bool is64bit = false; + DebugAbbrevWriter<> debug_abbrev(&debug_abbrev_data_); + DebugInfoEntryWriter<> info(is64bit, &debug_abbrev); + DW_CHECK("Contents of the .debug_info section:"); + info.StartTag(dwarf::DW_TAG_compile_unit); + DW_CHECK("Abbrev Number: 1 (DW_TAG_compile_unit)"); + info.WriteStrp(dwarf::DW_AT_producer, "Compiler name", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_producer : (indirect string, offset: 0x0): Compiler name"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01000000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1000000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x02000000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x2000000"); + info.StartTag(dwarf::DW_TAG_subprogram); + DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); + info.WriteStrp(dwarf::DW_AT_name, "Foo", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0xe): Foo"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01010000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1010000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x01020000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x1020000"); + info.EndTag(); // DW_TAG_subprogram + info.StartTag(dwarf::DW_TAG_subprogram); + DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); + info.WriteStrp(dwarf::DW_AT_name, "Bar", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0x12): Bar"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01020000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1020000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x01030000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x1030000"); + info.EndTag(); // DW_TAG_subprogram + info.EndTag(); // DW_TAG_compile_unit + // Test that previous list was properly terminated and empty children. + info.StartTag(dwarf::DW_TAG_compile_unit); + info.EndTag(); // DW_TAG_compile_unit + + // The abbrev table is just side product, but check it as well. + DW_CHECK("Abbrev Number: 3 (DW_TAG_compile_unit)"); + DW_CHECK("Contents of the .debug_abbrev section:"); + DW_CHECK("1 DW_TAG_compile_unit [has children]"); + DW_CHECK_NEXT("DW_AT_producer DW_FORM_strp"); + DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); + DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); + DW_CHECK("2 DW_TAG_subprogram [no children]"); + DW_CHECK_NEXT("DW_AT_name DW_FORM_strp"); + DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); + DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); + DW_CHECK("3 DW_TAG_compile_unit [no children]"); + + dwarf::WriteDebugInfoCU(/* debug_abbrev_offset= */ 0, info, &debug_info_data_); + + CheckObjdumpOutput(is64bit, "-W"); +} + +#endif // ART_TARGET_ANDROID + +} // namespace dwarf +} // namespace art diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h new file mode 100644 index 0000000..e51f807 --- /dev/null +++ b/compiler/debug/dwarf/dwarf_test.h @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ +#define ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ + +#include +#include +#include + +#include +#include +#include +#include + +#include "base/os.h" +#include "base/unix_file/fd_file.h" +#include "common_compiler_test.h" +#include "elf/elf_builder.h" +#include "gtest/gtest.h" +#include "stream/file_output_stream.h" + +namespace art { +namespace dwarf { + +#define DW_CHECK(substring) Check(substring, false, __FILE__, __LINE__) +#define DW_CHECK_NEXT(substring) Check(substring, true, __FILE__, __LINE__) + +class DwarfTest : public CommonCompilerTest { + public: + static constexpr bool kPrintObjdumpOutput = false; // debugging. + + struct ExpectedLine { + std::string substring; + bool next; + const char* at_file; + int at_line; + }; + + // Check that the objdump output contains given output. + // If next is true, it must be the next line. Otherwise lines are skipped. + void Check(const char* substr, bool next, const char* at_file, int at_line) { + expected_lines_.push_back(ExpectedLine {substr, next, at_file, at_line}); + } + + // Pretty-print the generated DWARF data using objdump. + template + std::vector Objdump(const char* args) { + // Write simple elf file with just the DWARF sections. + InstructionSet isa = + (sizeof(typename ElfTypes::Addr) == 8) ? InstructionSet::kX86_64 : InstructionSet::kX86; + ScratchFile file; + FileOutputStream output_stream(file.GetFile()); + ElfBuilder builder(isa, &output_stream); + builder.Start(); + if (!debug_info_data_.empty()) { + builder.WriteSection(".debug_info", &debug_info_data_); + } + if (!debug_abbrev_data_.empty()) { + builder.WriteSection(".debug_abbrev", &debug_abbrev_data_); + } + if (!debug_str_data_.empty()) { + builder.WriteSection(".debug_str", &debug_str_data_); + } + if (!debug_line_data_.empty()) { + builder.WriteSection(".debug_line", &debug_line_data_); + } + if (!debug_frame_data_.empty()) { + builder.WriteSection(".debug_frame", &debug_frame_data_); + } + builder.End(); + EXPECT_TRUE(builder.Good()); + + // Read the elf file back using objdump. + std::vector lines; + std::string cmd = GetAndroidHostToolsDir(); + cmd = cmd + "objdump " + args + " " + file.GetFilename() + " 2>&1"; + FILE* output = popen(cmd.data(), "r"); + char buffer[1024]; + const char* line; + while ((line = fgets(buffer, sizeof(buffer), output)) != nullptr) { + if (kPrintObjdumpOutput) { + printf("%s", line); + } + if (line[0] != '\0' && line[0] != '\n') { + EXPECT_TRUE(strstr(line, "objdump: Error:") == nullptr) << line; + EXPECT_TRUE(strstr(line, "objdump: Warning:") == nullptr) << line; + std::string str(line); + if (str.back() == '\n') { + str.pop_back(); + } + lines.push_back(str); + } + } + pclose(output); + return lines; + } + + std::vector Objdump(bool is64bit, const char* args) { + if (is64bit) { + return Objdump(args); + } else { + return Objdump(args); + } + } + + // Compare objdump output to the recorded checks. + void CheckObjdumpOutput(bool is64bit, const char* args) { + std::vector actual_lines = Objdump(is64bit, args); + auto actual_line = actual_lines.begin(); + for (const ExpectedLine& expected_line : expected_lines_) { + const std::string& substring = expected_line.substring; + if (actual_line == actual_lines.end()) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Seen end of output."; + } else if (expected_line.next) { + if (actual_line->find(substring) == std::string::npos) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Seen '" << actual_line->data() << "'."; + } else { + // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); + } + actual_line++; + } else { + bool found = false; + for (auto it = actual_line; it < actual_lines.end(); it++) { + if (it->find(substring) != std::string::npos) { + actual_line = it; + found = true; + break; + } + } + if (!found) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Not found anywhere in the rest of the output."; + } else { + // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); + actual_line++; + } + } + } + } + + // Buffers which are going to assembled into ELF file and passed to objdump. + std::vector debug_frame_data_; + std::vector debug_info_data_; + std::vector debug_abbrev_data_; + std::vector debug_str_data_; + std::vector debug_line_data_; + + // The expected output of objdump. + std::vector expected_lines_; +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ diff --git a/compiler/debug/elf_compilation_unit.h b/compiler/debug/elf_compilation_unit.h new file mode 100644 index 0000000..b1d89eb --- /dev/null +++ b/compiler/debug/elf_compilation_unit.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ +#define ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ + +#include + +#include "debug/method_debug_info.h" + +namespace art { +namespace debug { + +struct ElfCompilationUnit { + std::vector methods; + size_t debug_line_offset = 0; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address = std::numeric_limits::max(); + uint64_t code_end = 0; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ + diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h new file mode 100644 index 0000000..f41db07 --- /dev/null +++ b/compiler/debug/elf_debug_frame_writer.h @@ -0,0 +1,237 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ + +#include + +#include "arch/instruction_set.h" +#include "debug/method_debug_info.h" +#include "dwarf/debug_frame_opcode_writer.h" +#include "dwarf/dwarf_constants.h" +#include "dwarf/headers.h" +#include "elf/elf_builder.h" + +namespace art { +namespace debug { + +static constexpr bool kWriteDebugFrameHdr = false; + +// Binary search table is not useful if the number of entries is small. +// In particular, this avoids it for the in-memory JIT mini-debug-info. +static constexpr size_t kMinDebugFrameHdrEntries = 100; + +static void WriteCIE(InstructionSet isa, /*inout*/ std::vector* buffer) { + using Reg = dwarf::Reg; + // Scratch registers should be marked as undefined. This tells the + // debugger that its value in the previous frame is not recoverable. + bool is64bit = Is64BitInstructionSet(isa); + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::ArmCore(13), 0); // R13(SP). + // core registers. + for (int reg = 0; reg < 13; reg++) { + if (reg < 4 || reg == 12) { + opcodes.Undefined(Reg::ArmCore(reg)); + } else { + opcodes.SameValue(Reg::ArmCore(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 32; reg++) { + if (reg < 16) { + opcodes.Undefined(Reg::ArmFp(reg)); + } else { + opcodes.SameValue(Reg::ArmFp(reg)); + } + } + auto return_reg = Reg::ArmCore(14); // R14(LR). + WriteCIE(is64bit, return_reg, opcodes, buffer); + return; + } + case InstructionSet::kArm64: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::Arm64Core(31), 0); // R31(SP). + // core registers. + for (int reg = 0; reg < 30; reg++) { + if (reg < 8 || reg == 16 || reg == 17) { + opcodes.Undefined(Reg::Arm64Core(reg)); + } else { + opcodes.SameValue(Reg::Arm64Core(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 32; reg++) { + if (reg < 8 || reg >= 16) { + opcodes.Undefined(Reg::Arm64Fp(reg)); + } else { + opcodes.SameValue(Reg::Arm64Fp(reg)); + } + } + auto return_reg = Reg::Arm64Core(30); // R30(LR). + WriteCIE(is64bit, return_reg, opcodes, buffer); + return; + } + case InstructionSet::kX86: { + // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296 + constexpr bool generate_opcodes_for_x86_fp = false; + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP). + opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP). + // core registers. + for (int reg = 0; reg < 8; reg++) { + if (reg <= 3) { + opcodes.Undefined(Reg::X86Core(reg)); + } else if (reg == 4) { + // Stack pointer. + } else { + opcodes.SameValue(Reg::X86Core(reg)); + } + } + // fp registers. + if (generate_opcodes_for_x86_fp) { + for (int reg = 0; reg < 8; reg++) { + opcodes.Undefined(Reg::X86Fp(reg)); + } + } + auto return_reg = Reg::X86Core(8); // R8(EIP). + WriteCIE(is64bit, return_reg, opcodes, buffer); + return; + } + case InstructionSet::kX86_64: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::X86_64Core(4), 8); // R4(RSP). + opcodes.Offset(Reg::X86_64Core(16), -8); // R16(RIP). + // core registers. + for (int reg = 0; reg < 16; reg++) { + if (reg == 4) { + // Stack pointer. + } else if (reg < 12 && reg != 3 && reg != 5) { // except EBX and EBP. + opcodes.Undefined(Reg::X86_64Core(reg)); + } else { + opcodes.SameValue(Reg::X86_64Core(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 16; reg++) { + if (reg < 12) { + opcodes.Undefined(Reg::X86_64Fp(reg)); + } else { + opcodes.SameValue(Reg::X86_64Fp(reg)); + } + } + auto return_reg = Reg::X86_64Core(16); // R16(RIP). + WriteCIE(is64bit, return_reg, opcodes, buffer); + return; + } + case InstructionSet::kNone: + break; + } + LOG(FATAL) << "Cannot write CIE frame for ISA " << isa; + UNREACHABLE(); +} + +template +void WriteCFISection(ElfBuilder* builder, + const ArrayRef& method_infos) { + typedef typename ElfTypes::Addr Elf_Addr; + + // The methods can be written in any order. + // Let's therefore sort them in the lexicographical order of the opcodes. + // This has no effect on its own. However, if the final .debug_frame section is + // compressed it reduces the size since similar opcodes sequences are grouped. + std::vector sorted_method_infos; + sorted_method_infos.reserve(method_infos.size()); + for (size_t i = 0; i < method_infos.size(); i++) { + if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) { + sorted_method_infos.push_back(&method_infos[i]); + } + } + if (sorted_method_infos.empty()) { + return; + } + std::stable_sort( + sorted_method_infos.begin(), + sorted_method_infos.end(), + [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) { + ArrayRef l = lhs->cfi; + ArrayRef r = rhs->cfi; + return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end()); + }); + + std::vector binary_search_table; + if (kWriteDebugFrameHdr) { + binary_search_table.reserve(2 * sorted_method_infos.size()); + } + + // Write .debug_frame section. + auto* cfi_section = builder->GetDebugFrame(); + { + cfi_section->Start(); + const bool is64bit = Is64BitInstructionSet(builder->GetIsa()); + std::vector buffer; // Small temporary buffer. + WriteCIE(builder->GetIsa(), &buffer); + cfi_section->WriteFully(buffer.data(), buffer.size()); + buffer.clear(); + for (const MethodDebugInfo* mi : sorted_method_infos) { + DCHECK(!mi->deduped); + DCHECK(!mi->cfi.empty()); + const Elf_Addr code_address = mi->code_address + + (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0); + if (kWriteDebugFrameHdr) { + binary_search_table.push_back(dchecked_integral_cast(code_address)); + binary_search_table.push_back(cfi_section->GetPosition()); + } + dwarf::WriteFDE(is64bit, + /* cie_pointer= */ 0, + code_address, + mi->code_size, + mi->cfi, + &buffer); + cfi_section->WriteFully(buffer.data(), buffer.size()); + buffer.clear(); + } + cfi_section->End(); + } + + if (kWriteDebugFrameHdr && method_infos.size() > kMinDebugFrameHdrEntries) { + std::sort(binary_search_table.begin(), binary_search_table.end()); + + // Custom Android section. It is very similar to the official .eh_frame_hdr format. + std::vector header_buffer; + dwarf::Writer<> header(&header_buffer); + header.PushUint8(1); // Version. + header.PushUint8(dwarf::DW_EH_PE_omit); // Encoding of .eh_frame pointer - none. + header.PushUint8(dwarf::DW_EH_PE_udata4); // Encoding of binary search table size. + header.PushUint8(dwarf::DW_EH_PE_udata4); // Encoding of binary search table data. + header.PushUint32(dchecked_integral_cast(binary_search_table.size()/2)); + + auto* header_section = builder->GetDebugFrameHdr(); + header_section->Start(); + header_section->WriteFully(header_buffer.data(), header_buffer.size()); + header_section->WriteFully(binary_search_table.data(), binary_search_table.size()); + header_section->End(); + } +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ + diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h new file mode 100644 index 0000000..986c7e8 --- /dev/null +++ b/compiler/debug/elf_debug_info_writer.h @@ -0,0 +1,678 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ + +#include +#include +#include + +#include "art_field-inl.h" +#include "debug/elf_compilation_unit.h" +#include "debug/elf_debug_loc_writer.h" +#include "debug/method_debug_info.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file-inl.h" +#include "dex/dex_file.h" +#include "dwarf/debug_abbrev_writer.h" +#include "dwarf/debug_info_entry_writer.h" +#include "elf/elf_builder.h" +#include "heap_poisoning.h" +#include "linear_alloc.h" +#include "mirror/array.h" +#include "mirror/class-inl.h" +#include "mirror/class.h" +#include "oat_file.h" +#include "obj_ptr-inl.h" + +namespace art { +namespace debug { + +static std::vector GetParamNames(const MethodDebugInfo* mi) { + std::vector names; + DCHECK(mi->dex_file != nullptr); + CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index); + if (accessor.HasCodeItem()) { + accessor.VisitParameterNames([&](const dex::StringIndex& id) { + names.push_back(mi->dex_file->StringDataByIdx(id)); + }); + } + return names; +} + +// Helper class to write .debug_info and its supporting sections. +template +class ElfDebugInfoWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfDebugInfoWriter(ElfBuilder* builder) + : builder_(builder), + debug_abbrev_(&debug_abbrev_buffer_) { + } + + void Start() { + builder_->GetDebugInfo()->Start(); + } + + void End() { + builder_->GetDebugInfo()->End(); + builder_->WriteSection(".debug_abbrev", &debug_abbrev_buffer_); + if (!debug_loc_.empty()) { + builder_->WriteSection(".debug_loc", &debug_loc_); + } + if (!debug_ranges_.empty()) { + builder_->WriteSection(".debug_ranges", &debug_ranges_); + } + } + + private: + ElfBuilder* builder_; + std::vector debug_abbrev_buffer_; + dwarf::DebugAbbrevWriter<> debug_abbrev_; + std::vector debug_loc_; + std::vector debug_ranges_; + + std::unordered_set defined_dex_classes_; // For CHECKs only. + + template + friend class ElfCompilationUnitWriter; +}; + +// Helper class to write one compilation unit. +// It holds helper methods and temporary state. +template +class ElfCompilationUnitWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfCompilationUnitWriter(ElfDebugInfoWriter* owner) + : owner_(owner), + info_(Is64BitInstructionSet(owner_->builder_->GetIsa()), &owner->debug_abbrev_) { + } + + void Write(const ElfCompilationUnit& compilation_unit) { + CHECK(!compilation_unit.methods.empty()); + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative + ? owner_->builder_->GetText()->GetAddress() + : 0; + const bool is64bit = Is64BitInstructionSet(owner_->builder_->GetIsa()); + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + info_.StartTag(DW_TAG_compile_unit); + info_.WriteString(DW_AT_producer, "Android dex2oat"); + info_.WriteData1(DW_AT_language, DW_LANG_Java); + info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT"); + // The low_pc acts as base address for several other addresses/ranges. + info_.WriteAddr(DW_AT_low_pc, base_address + compilation_unit.code_address); + info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset); + + // Write .debug_ranges entries covering code ranges of the whole compilation unit. + dwarf::Writer<> debug_ranges(&owner_->debug_ranges_); + info_.WriteSecOffset(DW_AT_ranges, owner_->debug_ranges_.size()); + for (auto mi : compilation_unit.methods) { + uint64_t low_pc = mi->code_address - compilation_unit.code_address; + uint64_t high_pc = low_pc + mi->code_size; + if (is64bit) { + debug_ranges.PushUint64(low_pc); + debug_ranges.PushUint64(high_pc); + } else { + debug_ranges.PushUint32(low_pc); + debug_ranges.PushUint32(high_pc); + } + } + if (is64bit) { + debug_ranges.PushUint64(0); // End of list. + debug_ranges.PushUint64(0); + } else { + debug_ranges.PushUint32(0); // End of list. + debug_ranges.PushUint32(0); + } + + const char* last_dex_class_desc = nullptr; + for (auto mi : compilation_unit.methods) { + DCHECK(mi->dex_file != nullptr); + const DexFile* dex = mi->dex_file; + CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index); + const dex::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index); + const dex::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method); + const dex::TypeList* dex_params = dex->GetProtoParameters(dex_proto); + const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method); + const bool is_static = (mi->access_flags & kAccStatic) != 0; + + // Enclose the method in correct class definition. + if (last_dex_class_desc != dex_class_desc) { + if (last_dex_class_desc != nullptr) { + EndClassTag(); + } + // Write reference tag for the class we are about to declare. + size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type); + type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset); + size_t type_attrib_offset = info_.size(); + info_.WriteRef4(DW_AT_type, 0); + info_.EndTag(); + // Declare the class that owns this method. + size_t class_offset = StartClassTag(dex_class_desc); + info_.UpdateUint32(type_attrib_offset, class_offset); + info_.WriteFlagPresent(DW_AT_declaration); + // Check that each class is defined only once. + bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second; + CHECK(unique) << "Redefinition of " << dex_class_desc; + last_dex_class_desc = dex_class_desc; + } + + int start_depth = info_.Depth(); + info_.StartTag(DW_TAG_subprogram); + WriteName(dex->GetMethodName(dex_method)); + info_.WriteAddr(DW_AT_low_pc, base_address + mi->code_address); + info_.WriteUdata(DW_AT_high_pc, mi->code_size); + std::vector expr_buffer; + Expression expr(&expr_buffer); + expr.WriteOpCallFrameCfa(); + info_.WriteExprLoc(DW_AT_frame_base, expr); + WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto)); + + // Decode dex register locations for all stack maps. + // It might be expensive, so do it just once and reuse the result. + std::unique_ptr code_info; + std::vector dex_reg_maps; + if (accessor.HasCodeItem() && mi->code_info != nullptr) { + code_info.reset(new CodeInfo(mi->code_info)); + for (StackMap stack_map : code_info->GetStackMaps()) { + dex_reg_maps.push_back(code_info->GetDexRegisterMapOf(stack_map)); + } + } + + // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not + // guarantee order or uniqueness so it is safer to iterate over them manually. + // DecodeDebugLocalInfo might not also be available if there is no debug info. + std::vector param_names = GetParamNames(mi); + uint32_t arg_reg = 0; + if (!is_static) { + info_.StartTag(DW_TAG_formal_parameter); + WriteName("this"); + info_.WriteFlagPresent(DW_AT_artificial); + WriteLazyType(dex_class_desc); + if (accessor.HasCodeItem()) { + // Write the stack location of the parameter. + const uint32_t vreg = accessor.RegistersSize() - accessor.InsSize() + arg_reg; + const bool is64bitValue = false; + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); + } + arg_reg++; + info_.EndTag(); + } + if (dex_params != nullptr) { + for (uint32_t i = 0; i < dex_params->Size(); ++i) { + info_.StartTag(DW_TAG_formal_parameter); + // Parameter names may not be always available. + if (i < param_names.size()) { + WriteName(param_names[i]); + } + // Write the type. + const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_); + WriteLazyType(type_desc); + const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J'; + if (accessor.HasCodeItem()) { + // Write the stack location of the parameter. + const uint32_t vreg = accessor.RegistersSize() - accessor.InsSize() + arg_reg; + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); + } + arg_reg += is64bitValue ? 2 : 1; + info_.EndTag(); + } + if (accessor.HasCodeItem()) { + DCHECK_EQ(arg_reg, accessor.InsSize()); + } + } + + // Write local variables. + std::vector local_infos; + if (accessor.DecodeDebugLocalInfo(is_static, + mi->dex_method_index, + [&](const DexFile::LocalInfo& entry) { + local_infos.push_back(entry); + })) { + for (const DexFile::LocalInfo& var : local_infos) { + if (var.reg_ < accessor.RegistersSize() - accessor.InsSize()) { + info_.StartTag(DW_TAG_variable); + WriteName(var.name_); + WriteLazyType(var.descriptor_); + bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J'; + WriteRegLocation(mi, + dex_reg_maps, + var.reg_, + is64bitValue, + compilation_unit.code_address, + var.start_address_, + var.end_address_); + info_.EndTag(); + } + } + } + + info_.EndTag(); + CHECK_EQ(info_.Depth(), start_depth); // Balanced start/end. + } + if (last_dex_class_desc != nullptr) { + EndClassTag(); + } + FinishLazyTypes(); + CloseNamespacesAboveDepth(0); + info_.EndTag(); // DW_TAG_compile_unit + CHECK_EQ(info_.Depth(), 0); + std::vector buffer; + buffer.reserve(info_.data()->size() + KB); + // All compilation units share single table which is at the start of .debug_abbrev. + const size_t debug_abbrev_offset = 0; + WriteDebugInfoCU(debug_abbrev_offset, info_, &buffer); + owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); + } + + void Write(const ArrayRef& types) REQUIRES_SHARED(Locks::mutator_lock_) { + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + info_.StartTag(DW_TAG_compile_unit); + info_.WriteString(DW_AT_producer, "Android dex2oat"); + info_.WriteData1(DW_AT_language, DW_LANG_Java); + + // Base class references to be patched at the end. + std::map base_class_references; + + // Already written declarations or definitions. + std::map class_declarations; + + std::vector expr_buffer; + for (mirror::Class* type : types) { + if (type->IsPrimitive()) { + // For primitive types the definition and the declaration is the same. + if (type->GetPrimitiveType() != Primitive::kPrimVoid) { + WriteTypeDeclaration(type->GetDescriptor(nullptr)); + } + } else if (type->IsArrayClass()) { + ObjPtr element_type = type->GetComponentType(); + uint32_t component_size = type->GetComponentSize(); + uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value(); + uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value(); + + CloseNamespacesAboveDepth(0); // Declare in root namespace. + info_.StartTag(DW_TAG_array_type); + std::string descriptor_string; + WriteLazyType(element_type->GetDescriptor(&descriptor_string)); + WriteLinkageName(type); + info_.WriteUdata(DW_AT_data_member_location, data_offset); + info_.StartTag(DW_TAG_subrange_type); + Expression count_expr(&expr_buffer); + count_expr.WriteOpPushObjectAddress(); + count_expr.WriteOpPlusUconst(length_offset); + count_expr.WriteOpDerefSize(4); // Array length is always 32-bit wide. + info_.WriteExprLoc(DW_AT_count, count_expr); + info_.EndTag(); // DW_TAG_subrange_type. + info_.EndTag(); // DW_TAG_array_type. + } else if (type->IsInterface()) { + // Skip. Variables cannot have an interface as a dynamic type. + // We do not expose the interface information to the debugger in any way. + } else { + std::string descriptor_string; + const char* desc = type->GetDescriptor(&descriptor_string); + size_t class_offset = StartClassTag(desc); + class_declarations.emplace(type, class_offset); + + if (!type->IsVariableSize()) { + info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize()); + } + + WriteLinkageName(type); + + if (type->IsObjectClass()) { + // Generate artificial member which is used to get the dynamic type of variable. + // The run-time value of this field will correspond to linkage name of some type. + // We need to do it only once in j.l.Object since all other types inherit it. + info_.StartTag(DW_TAG_member); + WriteName(".dynamic_type"); + WriteLazyType(sizeof(uintptr_t) == 8 ? "J" : "I"); + info_.WriteFlagPresent(DW_AT_artificial); + // Create DWARF expression to get the value of the methods_ field. + Expression expr(&expr_buffer); + // The address of the object has been implicitly pushed on the stack. + // Dereference the klass_ field of Object (32-bit; possibly poisoned). + DCHECK_EQ(type->ClassOffset().Uint32Value(), 0u); + DCHECK_EQ(sizeof(mirror::HeapReference), 4u); + expr.WriteOpDerefSize(4); + if (kPoisonHeapReferences) { + expr.WriteOpNeg(); + // DWARF stack is pointer sized. Ensure that the high bits are clear. + expr.WriteOpConstu(0xFFFFFFFF); + expr.WriteOpAnd(); + } + // Add offset to the methods_ field. + expr.WriteOpPlusUconst(mirror::Class::MethodsOffset().Uint32Value()); + // Top of stack holds the location of the field now. + info_.WriteExprLoc(DW_AT_data_member_location, expr); + info_.EndTag(); // DW_TAG_member. + } + + // Base class. + ObjPtr base_class = type->GetSuperClass(); + if (base_class != nullptr) { + info_.StartTag(DW_TAG_inheritance); + base_class_references.emplace(info_.size(), base_class.Ptr()); + info_.WriteRef4(DW_AT_type, 0); + info_.WriteUdata(DW_AT_data_member_location, 0); + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); + info_.EndTag(); // DW_TAG_inheritance. + } + + // Member variables. + for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) { + ArtField* field = type->GetInstanceField(i); + info_.StartTag(DW_TAG_member); + WriteName(field->GetName()); + WriteLazyType(field->GetTypeDescriptor()); + info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value()); + uint32_t access_flags = field->GetAccessFlags(); + if (access_flags & kAccPublic) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); + } else if (access_flags & kAccProtected) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected); + } else if (access_flags & kAccPrivate) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); + } + info_.EndTag(); // DW_TAG_member. + } + + if (type->IsStringClass()) { + // Emit debug info about an artifical class member for java.lang.String which represents + // the first element of the data stored in a string instance. Consumers of the debug + // info will be able to read the content of java.lang.String based on the count (real + // field) and based on the location of this data member. + info_.StartTag(DW_TAG_member); + WriteName("value"); + // We don't support fields with C like array types so we just say its type is java char. + WriteLazyType("C"); // char. + info_.WriteUdata(DW_AT_data_member_location, + mirror::String::ValueOffset().Uint32Value()); + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); + info_.EndTag(); // DW_TAG_member. + } + + EndClassTag(); + } + } + + // Write base class declarations. + for (const auto& base_class_reference : base_class_references) { + size_t reference_offset = base_class_reference.first; + mirror::Class* base_class = base_class_reference.second; + const auto it = class_declarations.find(base_class); + if (it != class_declarations.end()) { + info_.UpdateUint32(reference_offset, it->second); + } else { + // Declare base class. We can not use the standard WriteLazyType + // since we want to avoid the DW_TAG_reference_tag wrapping. + std::string tmp_storage; + const char* base_class_desc = base_class->GetDescriptor(&tmp_storage); + size_t base_class_declaration_offset = StartClassTag(base_class_desc); + info_.WriteFlagPresent(DW_AT_declaration); + WriteLinkageName(base_class); + EndClassTag(); + class_declarations.emplace(base_class, base_class_declaration_offset); + info_.UpdateUint32(reference_offset, base_class_declaration_offset); + } + } + + FinishLazyTypes(); + CloseNamespacesAboveDepth(0); + info_.EndTag(); // DW_TAG_compile_unit. + CHECK_EQ(info_.Depth(), 0); + std::vector buffer; + buffer.reserve(info_.data()->size() + KB); + // All compilation units share single table which is at the start of .debug_abbrev. + const size_t debug_abbrev_offset = 0; + WriteDebugInfoCU(debug_abbrev_offset, info_, &buffer); + owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); + } + + // Write table into .debug_loc which describes location of dex register. + // The dex register might be valid only at some points and it might + // move between machine registers and stack. + void WriteRegLocation(const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low = 0, + uint32_t dex_pc_high = 0xFFFFFFFF) { + WriteDebugLocEntry(method_info, + dex_register_maps, + vreg, + is64bitValue, + compilation_unit_code_address, + dex_pc_low, + dex_pc_high, + owner_->builder_->GetIsa(), + &info_, + &owner_->debug_loc_, + &owner_->debug_ranges_); + } + + // Linkage name uniquely identifies type. + // It is used to determine the dynamic type of objects. + // We use the methods_ field of class since it is unique and it is not moved by the GC. + void WriteLinkageName(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) { + auto* methods_ptr = type->GetMethodsPtr(); + if (methods_ptr == nullptr) { + // Some types might have no methods. Allocate empty array instead. + LinearAlloc* allocator = Runtime::Current()->GetLinearAlloc(); + void* storage = allocator->Alloc(Thread::Current(), sizeof(LengthPrefixedArray)); + methods_ptr = new (storage) LengthPrefixedArray(0); + type->SetMethodsPtr(methods_ptr, 0, 0); + DCHECK(type->GetMethodsPtr() != nullptr); + } + char name[32]; + snprintf(name, sizeof(name), "0x%" PRIXPTR, reinterpret_cast(methods_ptr)); + info_.WriteString(dwarf::DW_AT_linkage_name, name); + } + + // Some types are difficult to define as we go since they need + // to be enclosed in the right set of namespaces. Therefore we + // just define all types lazily at the end of compilation unit. + void WriteLazyType(const char* type_descriptor) { + if (type_descriptor != nullptr && type_descriptor[0] != 'V') { + lazy_types_.emplace(std::string(type_descriptor), info_.size()); + info_.WriteRef4(dwarf::DW_AT_type, 0); + } + } + + void FinishLazyTypes() { + for (const auto& lazy_type : lazy_types_) { + info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first)); + } + lazy_types_.clear(); + } + + private: + void WriteName(const char* name) { + if (name != nullptr) { + info_.WriteString(dwarf::DW_AT_name, name); + } + } + + // Convert dex type descriptor to DWARF. + // Returns offset in the compilation unit. + size_t WriteTypeDeclaration(const std::string& desc) { + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + DCHECK(!desc.empty()); + const auto it = type_cache_.find(desc); + if (it != type_cache_.end()) { + return it->second; + } + + size_t offset; + if (desc[0] == 'L') { + // Class type. For example: Lpackage/name; + size_t class_offset = StartClassTag(desc.c_str()); + info_.WriteFlagPresent(DW_AT_declaration); + EndClassTag(); + // Reference to the class type. + offset = info_.StartTag(DW_TAG_reference_type); + info_.WriteRef(DW_AT_type, class_offset); + info_.EndTag(); + } else if (desc[0] == '[') { + // Array type. + size_t element_type = WriteTypeDeclaration(desc.substr(1)); + CloseNamespacesAboveDepth(0); // Declare in root namespace. + size_t array_type = info_.StartTag(DW_TAG_array_type); + info_.WriteFlagPresent(DW_AT_declaration); + info_.WriteRef(DW_AT_type, element_type); + info_.EndTag(); + offset = info_.StartTag(DW_TAG_reference_type); + info_.WriteRef4(DW_AT_type, array_type); + info_.EndTag(); + } else { + // Primitive types. + DCHECK_EQ(desc.size(), 1u); + + const char* name; + uint32_t encoding; + uint32_t byte_size; + switch (desc[0]) { + case 'B': + name = "byte"; + encoding = DW_ATE_signed; + byte_size = 1; + break; + case 'C': + name = "char"; + encoding = DW_ATE_UTF; + byte_size = 2; + break; + case 'D': + name = "double"; + encoding = DW_ATE_float; + byte_size = 8; + break; + case 'F': + name = "float"; + encoding = DW_ATE_float; + byte_size = 4; + break; + case 'I': + name = "int"; + encoding = DW_ATE_signed; + byte_size = 4; + break; + case 'J': + name = "long"; + encoding = DW_ATE_signed; + byte_size = 8; + break; + case 'S': + name = "short"; + encoding = DW_ATE_signed; + byte_size = 2; + break; + case 'Z': + name = "boolean"; + encoding = DW_ATE_boolean; + byte_size = 1; + break; + case 'V': + LOG(FATAL) << "Void type should not be encoded"; + UNREACHABLE(); + default: + LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\""; + UNREACHABLE(); + } + CloseNamespacesAboveDepth(0); // Declare in root namespace. + offset = info_.StartTag(DW_TAG_base_type); + WriteName(name); + info_.WriteData1(DW_AT_encoding, encoding); + info_.WriteData1(DW_AT_byte_size, byte_size); + info_.EndTag(); + } + + type_cache_.emplace(desc, offset); + return offset; + } + + // Start DW_TAG_class_type tag nested in DW_TAG_namespace tags. + // Returns offset of the class tag in the compilation unit. + size_t StartClassTag(const char* desc) { + std::string name = SetNamespaceForClass(desc); + size_t offset = info_.StartTag(dwarf::DW_TAG_class_type); + WriteName(name.c_str()); + return offset; + } + + void EndClassTag() { + info_.EndTag(); + } + + // Set the current namespace nesting to one required by the given class. + // Returns the class name with namespaces, 'L', and ';' stripped. + std::string SetNamespaceForClass(const char* desc) { + DCHECK(desc != nullptr && desc[0] == 'L'); + desc++; // Skip the initial 'L'. + size_t depth = 0; + for (const char* end; (end = strchr(desc, '/')) != nullptr; desc = end + 1, ++depth) { + // Check whether the name at this depth is already what we need. + if (depth < current_namespace_.size()) { + const std::string& name = current_namespace_[depth]; + if (name.compare(0, name.size(), desc, end - desc) == 0) { + continue; + } + } + // Otherwise we need to open a new namespace tag at this depth. + CloseNamespacesAboveDepth(depth); + info_.StartTag(dwarf::DW_TAG_namespace); + std::string name(desc, end - desc); + WriteName(name.c_str()); + current_namespace_.push_back(std::move(name)); + } + CloseNamespacesAboveDepth(depth); + return std::string(desc, strchr(desc, ';') - desc); + } + + // Close namespace tags to reach the given nesting depth. + void CloseNamespacesAboveDepth(size_t depth) { + DCHECK_LE(depth, current_namespace_.size()); + while (current_namespace_.size() > depth) { + info_.EndTag(); + current_namespace_.pop_back(); + } + } + + // For access to the ELF sections. + ElfDebugInfoWriter* owner_; + // Temporary buffer to create and store the entries. + dwarf::DebugInfoEntryWriter<> info_; + // Cache of already translated type descriptors. + std::map type_cache_; // type_desc -> definition_offset. + // 32-bit references which need to be resolved to a type later. + // Given type may be used multiple times. Therefore we need a multimap. + std::multimap lazy_types_; // type_desc -> patch_offset. + // The current set of open namespace tags which are active and not closed yet. + std::vector current_namespace_; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ + diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h new file mode 100644 index 0000000..e7b2a1b --- /dev/null +++ b/compiler/debug/elf_debug_line_writer.h @@ -0,0 +1,281 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ + +#include +#include + +#include "debug/elf_compilation_unit.h" +#include "debug/src_map_elem.h" +#include "dex/dex_file-inl.h" +#include "dwarf/debug_line_opcode_writer.h" +#include "dwarf/headers.h" +#include "elf/elf_builder.h" +#include "oat_file.h" +#include "stack_map.h" + +namespace art { +namespace debug { + +typedef std::vector PositionInfos; + +template +class ElfDebugLineWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfDebugLineWriter(ElfBuilder* builder) : builder_(builder) { + } + + void Start() { + builder_->GetDebugLine()->Start(); + } + + // Write line table for given set of methods. + // Returns the number of bytes written. + size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) { + const InstructionSet isa = builder_->GetIsa(); + const bool is64bit = Is64BitInstructionSet(isa); + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative + ? builder_->GetText()->GetAddress() + : 0; + + compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetPosition(); + + std::vector files; + std::unordered_map files_map; + std::vector directories; + std::unordered_map directories_map; + int code_factor_bits_ = 0; + int dwarf_isa = -1; + switch (isa) { + case InstructionSet::kArm: // arm actually means thumb2. + case InstructionSet::kThumb2: + code_factor_bits_ = 1; // 16-bit instuctions + dwarf_isa = 1; // DW_ISA_ARM_thumb. + break; + case InstructionSet::kArm64: + code_factor_bits_ = 2; // 32-bit instructions + break; + case InstructionSet::kNone: + case InstructionSet::kX86: + case InstructionSet::kX86_64: + break; + } + std::unordered_set seen_addresses(compilation_unit.methods.size()); + dwarf::DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_); + for (const MethodDebugInfo* mi : compilation_unit.methods) { + // Ignore function if we have already generated line table for the same address. + // It would confuse the debugger and the DWARF specification forbids it. + // We allow the line table for method to be replicated in different compilation unit. + // This ensures that each compilation unit contains line table for all its methods. + if (!seen_addresses.insert(mi->code_address).second) { + continue; + } + + uint32_t prologue_end = std::numeric_limits::max(); + std::vector pc2dex_map; + if (mi->code_info != nullptr) { + // Use stack maps to create mapping table from pc to dex. + const CodeInfo code_info(mi->code_info); + pc2dex_map.reserve(code_info.GetNumberOfStackMaps()); + for (StackMap stack_map : code_info.GetStackMaps()) { + const uint32_t pc = stack_map.GetNativePcOffset(isa); + const int32_t dex = stack_map.GetDexPc(); + pc2dex_map.push_back({pc, dex}); + if (stack_map.HasDexRegisterMap()) { + // Guess that the first map with local variables is the end of prologue. + prologue_end = std::min(prologue_end, pc); + } + } + std::sort(pc2dex_map.begin(), pc2dex_map.end()); + } + + if (pc2dex_map.empty()) { + continue; + } + + // Compensate for compiler's off-by-one-instruction error. + // + // The compiler generates stackmap with PC *after* the branch instruction + // (because this is the PC which is easier to obtain when unwinding). + // + // However, the debugger is more clever and it will ask us for line-number + // mapping at the location of the branch instruction (since the following + // instruction could belong to other line, this is the correct thing to do). + // + // So we really want to just decrement the PC by one instruction so that the + // branch instruction is covered as well. However, we do not know the size + // of the previous instruction, and we can not subtract just a fixed amount + // (the debugger would trust us that the PC is valid; it might try to set + // breakpoint there at some point, and setting breakpoint in mid-instruction + // would make the process crash in spectacular way). + // + // Therefore, we say that the PC which the compiler gave us for the stackmap + // is the end of its associated address range, and we use the PC from the + // previous stack map as the start of the range. This ensures that the PC is + // valid and that the branch instruction is covered. + // + // This ensures we have correct line number mapping at call sites (which is + // important for backtraces), but there is nothing we can do for non-call + // sites (so stepping through optimized code in debugger is not possible). + // + // We do not adjust the stackmaps if the code was compiled as debuggable. + // In that case, the stackmaps should accurately cover all instructions. + if (!mi->is_native_debuggable) { + for (size_t i = pc2dex_map.size() - 1; i > 0; --i) { + pc2dex_map[i].from_ = pc2dex_map[i - 1].from_; + } + pc2dex_map[0].from_ = 0; + } + + Elf_Addr method_address = base_address + mi->code_address; + + PositionInfos dex2line_map; + const DexFile* dex = mi->dex_file; + DCHECK(dex != nullptr); + CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index); + if (!accessor.DecodeDebugPositionInfo( + [&](const DexFile::PositionInfo& entry) { + dex2line_map.push_back(entry); + return false; + })) { + continue; + } + + if (dex2line_map.empty()) { + continue; + } + + opcodes.SetAddress(method_address); + if (dwarf_isa != -1) { + opcodes.SetISA(dwarf_isa); + } + + // Get and deduplicate directory and filename. + int file_index = 0; // 0 - primary source file of the compilation. + auto& dex_class_def = dex->GetClassDef(mi->class_def_index); + const char* source_file = dex->GetSourceFile(dex_class_def); + if (source_file != nullptr) { + std::string file_name(source_file); + size_t file_name_slash = file_name.find_last_of('/'); + std::string class_name(dex->GetClassDescriptor(dex_class_def)); + size_t class_name_slash = class_name.find_last_of('/'); + std::string full_path(file_name); + + // Guess directory from package name. + int directory_index = 0; // 0 - current directory of the compilation. + if (file_name_slash == std::string::npos && // Just filename. + class_name.front() == 'L' && // Type descriptor for a class. + class_name_slash != std::string::npos) { // Has package name. + std::string package_name = class_name.substr(1, class_name_slash - 1); + auto it = directories_map.find(package_name); + if (it == directories_map.end()) { + directory_index = 1 + directories.size(); + directories_map.emplace(package_name, directory_index); + directories.push_back(package_name); + } else { + directory_index = it->second; + } + full_path = package_name + "/" + file_name; + } + + // Add file entry. + auto it2 = files_map.find(full_path); + if (it2 == files_map.end()) { + file_index = 1 + files.size(); + files_map.emplace(full_path, file_index); + files.push_back(dwarf::FileEntry { + file_name, + directory_index, + 0, // Modification time - NA. + 0, // File size - NA. + }); + } else { + file_index = it2->second; + } + } + opcodes.SetFile(file_index); + + // Generate mapping opcodes from PC to Java lines. + if (file_index != 0) { + // If the method was not compiled as native-debuggable, we still generate all available + // lines, but we try to prevent the debugger from stepping and setting breakpoints since + // the information is too inaccurate for that (breakpoints would be set after the calls). + const bool default_is_stmt = mi->is_native_debuggable; + bool first = true; + for (SrcMapElem pc2dex : pc2dex_map) { + uint32_t pc = pc2dex.from_; + int dex_pc = pc2dex.to_; + // Find mapping with address with is greater than our dex pc; then go back one step. + auto dex2line = std::upper_bound( + dex2line_map.begin(), + dex2line_map.end(), + dex_pc, + [](uint32_t address, const DexFile::PositionInfo& entry) { + return address < entry.address_; + }); + // Look for first valid mapping after the prologue. + if (dex2line != dex2line_map.begin() && pc >= prologue_end) { + int line = (--dex2line)->line_; + if (first) { + first = false; + if (pc > 0) { + // Assume that any preceding code is prologue. + int first_line = dex2line_map.front().line_; + // Prologue is not a sensible place for a breakpoint. + opcodes.SetIsStmt(false); + opcodes.AddRow(method_address, first_line); + opcodes.SetPrologueEnd(); + } + opcodes.SetIsStmt(default_is_stmt); + opcodes.AddRow(method_address + pc, line); + } else if (line != opcodes.CurrentLine()) { + opcodes.SetIsStmt(default_is_stmt); + opcodes.AddRow(method_address + pc, line); + } + } + } + } else { + // line 0 - instruction cannot be attributed to any source line. + opcodes.AddRow(method_address, 0); + } + + opcodes.AdvancePC(method_address + mi->code_size); + opcodes.EndSequence(); + } + std::vector buffer; + buffer.reserve(opcodes.data()->size() + KB); + WriteDebugLineTable(directories, files, opcodes, &buffer); + builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size()); + return buffer.size(); + } + + void End() { + builder_->GetDebugLine()->End(); + } + + private: + ElfBuilder* builder_; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ + diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h new file mode 100644 index 0000000..37ab948 --- /dev/null +++ b/compiler/debug/elf_debug_loc_writer.h @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ + +#include +#include + +#include "arch/instruction_set.h" +#include "compiled_method.h" +#include "debug/method_debug_info.h" +#include "dwarf/debug_info_entry_writer.h" +#include "dwarf/register.h" +#include "stack_map.h" + +namespace art { +namespace debug { +using Reg = dwarf::Reg; + +static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) { + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: + return Reg::ArmCore(machine_reg); + case InstructionSet::kArm64: + return Reg::Arm64Core(machine_reg); + case InstructionSet::kX86: + return Reg::X86Core(machine_reg); + case InstructionSet::kX86_64: + return Reg::X86_64Core(machine_reg); + case InstructionSet::kNone: + LOG(FATAL) << "No instruction set"; + } + UNREACHABLE(); +} + +static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) { + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: + return Reg::ArmFp(machine_reg); + case InstructionSet::kArm64: + return Reg::Arm64Fp(machine_reg); + case InstructionSet::kX86: + return Reg::X86Fp(machine_reg); + case InstructionSet::kX86_64: + return Reg::X86_64Fp(machine_reg); + case InstructionSet::kNone: + LOG(FATAL) << "No instruction set"; + } + UNREACHABLE(); +} + +struct VariableLocation { + uint32_t low_pc; // Relative to compilation unit. + uint32_t high_pc; // Relative to compilation unit. + DexRegisterLocation reg_lo; // May be None if the location is unknown. + DexRegisterLocation reg_hi; // Most significant bits of 64-bit value. +}; + +// Get the location of given dex register (e.g. stack or machine register). +// Note that the location might be different based on the current pc. +// The result will cover all ranges where the variable is in scope. +// PCs corresponding to stackmap with dex register map are accurate, +// all other PCs are best-effort only. +static std::vector GetVariableLocations( + const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low, + uint32_t dex_pc_high, + InstructionSet isa) { + std::vector variable_locations; + + // Get stack maps sorted by pc (they might not be sorted internally). + // TODO(dsrbecky) Remove this once stackmaps get sorted by pc. + const CodeInfo code_info(method_info->code_info); + std::map stack_maps; // low_pc -> stack_map_index. + for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) { + StackMap stack_map = code_info.GetStackMapAt(s); + DCHECK(stack_map.IsValid()); + if (!stack_map.HasDexRegisterMap()) { + // The compiler creates stackmaps without register maps at the start of + // basic blocks in order to keep instruction-accurate line number mapping. + // However, we never stop at those (breakpoint locations always have map). + // Therefore, for the purpose of local variables, we ignore them. + // The main reason for this is to save space by avoiding undefined gaps. + continue; + } + const uint32_t pc_offset = stack_map.GetNativePcOffset(isa); + DCHECK_LE(pc_offset, method_info->code_size); + DCHECK_LE(compilation_unit_code_address, method_info->code_address); + const uint32_t low_pc = dchecked_integral_cast( + method_info->code_address + pc_offset - compilation_unit_code_address); + stack_maps.emplace(low_pc, s); + } + + // Create entries for the requested register based on stack map data. + for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) { + const uint32_t low_pc = it->first; + const uint32_t stack_map_index = it->second; + const StackMap stack_map = code_info.GetStackMapAt(stack_map_index); + auto next_it = it; + next_it++; + const uint32_t high_pc = next_it != stack_maps.end() + ? next_it->first + : method_info->code_address + method_info->code_size - compilation_unit_code_address; + DCHECK_LE(low_pc, high_pc); + if (low_pc == high_pc) { + continue; // Ignore if the address range is empty. + } + + // Check that the stack map is in the requested range. + uint32_t dex_pc = stack_map.GetDexPc(); + if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) { + // The variable is not in scope at this PC. Therefore omit the entry. + // Note that this is different to None() entry which means in scope, but unknown location. + continue; + } + + // Find the location of the dex register. + DexRegisterLocation reg_lo = DexRegisterLocation::None(); + DexRegisterLocation reg_hi = DexRegisterLocation::None(); + DCHECK_LT(stack_map_index, dex_register_maps.size()); + DexRegisterMap dex_register_map = dex_register_maps[stack_map_index]; + DCHECK(!dex_register_map.empty()); + CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item); + reg_lo = dex_register_map[vreg]; + if (is64bitValue) { + reg_hi = dex_register_map[vreg + 1]; + } + + // Add location entry for this address range. + if (!variable_locations.empty() && + variable_locations.back().reg_lo == reg_lo && + variable_locations.back().reg_hi == reg_hi && + variable_locations.back().high_pc == low_pc) { + // Merge with the previous entry (extend its range). + variable_locations.back().high_pc = high_pc; + } else { + variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi}); + } + } + + return variable_locations; +} + +// Write table into .debug_loc which describes location of dex register. +// The dex register might be valid only at some points and it might +// move between machine registers and stack. +static void WriteDebugLocEntry(const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low, + uint32_t dex_pc_high, + InstructionSet isa, + dwarf::DebugInfoEntryWriter<>* debug_info, + std::vector* debug_loc_buffer, + std::vector* debug_ranges_buffer) { + using Kind = DexRegisterLocation::Kind; + if (method_info->code_info == nullptr || dex_register_maps.empty()) { + return; + } + + std::vector variable_locations = GetVariableLocations( + method_info, + dex_register_maps, + vreg, + is64bitValue, + compilation_unit_code_address, + dex_pc_low, + dex_pc_high, + isa); + + // Write .debug_loc entries. + dwarf::Writer<> debug_loc(debug_loc_buffer); + const size_t debug_loc_offset = debug_loc.size(); + const bool is64bit = Is64BitInstructionSet(isa); + std::vector expr_buffer; + for (const VariableLocation& variable_location : variable_locations) { + // Translate dex register location to DWARF expression. + // Note that 64-bit value might be split to two distinct locations. + // (for example, two 32-bit machine registers, or even stack and register) + dwarf::Expression expr(&expr_buffer); + DexRegisterLocation reg_lo = variable_location.reg_lo; + DexRegisterLocation reg_hi = variable_location.reg_hi; + for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) { + DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi); + const Kind kind = reg_loc.GetKind(); + const int32_t value = reg_loc.GetValue(); + if (kind == Kind::kInStack) { + // The stack offset is relative to SP. Make it relative to CFA. + expr.WriteOpFbreg(value - method_info->frame_size_in_bytes); + if (piece == 0 && reg_hi.GetKind() == Kind::kInStack && + reg_hi.GetValue() == value + 4) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kInRegister) { + expr.WriteOpReg(GetDwarfCoreReg(isa, value).num()); + if (piece == 0 && reg_hi.GetKind() == Kind::kInRegisterHigh && + reg_hi.GetValue() == value) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kInFpuRegister) { + if ((isa == InstructionSet::kArm || isa == InstructionSet::kThumb2) && + piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister && + reg_hi.GetValue() == value + 1 && value % 2 == 0) { + // Translate S register pair to D register (e.g. S4+S5 to D2). + expr.WriteOpReg(Reg::ArmDp(value / 2).num()); + break; + } + expr.WriteOpReg(GetDwarfFpReg(isa, value).num()); + if (piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegisterHigh && + reg_hi.GetValue() == reg_lo.GetValue()) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kConstant) { + expr.WriteOpConsts(value); + expr.WriteOpStackValue(); + } else if (kind == Kind::kNone) { + break; + } else { + // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind(). + // kInRegisterHigh and kInFpuRegisterHigh should be handled by + // the special cases above and they should not occur alone. + LOG(WARNING) << "Unexpected register location: " << kind + << " (This can indicate either a bug in the dexer when generating" + << " local variable information, or a bug in ART compiler." + << " Please file a bug at go/art-bug)"; + break; + } + if (is64bitValue) { + // Write the marker which is needed by split 64-bit values. + // This code is skipped by the special cases. + expr.WriteOpPiece(4); + } + } + + if (expr.size() > 0) { + if (is64bit) { + debug_loc.PushUint64(variable_location.low_pc); + debug_loc.PushUint64(variable_location.high_pc); + } else { + debug_loc.PushUint32(variable_location.low_pc); + debug_loc.PushUint32(variable_location.high_pc); + } + // Write the expression. + debug_loc.PushUint16(expr.size()); + debug_loc.PushData(expr.data()); + } else { + // Do not generate .debug_loc if the location is not known. + } + } + // Write end-of-list entry. + if (is64bit) { + debug_loc.PushUint64(0); + debug_loc.PushUint64(0); + } else { + debug_loc.PushUint32(0); + debug_loc.PushUint32(0); + } + + // Write .debug_ranges entries. + // This includes ranges where the variable is in scope but the location is not known. + dwarf::Writer<> debug_ranges(debug_ranges_buffer); + size_t debug_ranges_offset = debug_ranges.size(); + for (size_t i = 0; i < variable_locations.size(); i++) { + uint32_t low_pc = variable_locations[i].low_pc; + uint32_t high_pc = variable_locations[i].high_pc; + while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) { + // Merge address range with the next entry. + high_pc = variable_locations[++i].high_pc; + } + if (is64bit) { + debug_ranges.PushUint64(low_pc); + debug_ranges.PushUint64(high_pc); + } else { + debug_ranges.PushUint32(low_pc); + debug_ranges.PushUint32(high_pc); + } + } + // Write end-of-list entry. + if (is64bit) { + debug_ranges.PushUint64(0); + debug_ranges.PushUint64(0); + } else { + debug_ranges.PushUint32(0); + debug_ranges.PushUint32(0); + } + + // Simple de-duplication - check whether this entry is same as the last one (or tail of it). + size_t debug_ranges_entry_size = debug_ranges.size() - debug_ranges_offset; + if (debug_ranges_offset >= debug_ranges_entry_size) { + size_t previous_offset = debug_ranges_offset - debug_ranges_entry_size; + if (memcmp(debug_ranges_buffer->data() + previous_offset, + debug_ranges_buffer->data() + debug_ranges_offset, + debug_ranges_entry_size) == 0) { + // Remove what we have just written and use the last entry instead. + debug_ranges_buffer->resize(debug_ranges_offset); + debug_ranges_offset = previous_offset; + } + } + + // Write attributes to .debug_info. + debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc_offset); + debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges_offset); +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ + diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc new file mode 100644 index 0000000..765a81d --- /dev/null +++ b/compiler/debug/elf_debug_writer.cc @@ -0,0 +1,381 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "elf_debug_writer.h" + +#include +#include +#include + +#include "base/array_ref.h" +#include "base/stl_util.h" +#include "debug/elf_compilation_unit.h" +#include "debug/elf_debug_frame_writer.h" +#include "debug/elf_debug_info_writer.h" +#include "debug/elf_debug_line_writer.h" +#include "debug/elf_debug_loc_writer.h" +#include "debug/elf_symtab_writer.h" +#include "debug/method_debug_info.h" +#include "dwarf/dwarf_constants.h" +#include "elf/elf_builder.h" +#include "elf/elf_debug_reader.h" +#include "elf/elf_utils.h" +#include "elf/xz_utils.h" +#include "jit/debugger_interface.h" +#include "oat.h" +#include "stream/vector_output_stream.h" + +namespace art { +namespace debug { + +using ElfRuntimeTypes = std::conditional::type; + +template +void WriteDebugInfo(ElfBuilder* builder, + const DebugInfo& debug_info) { + // Write .strtab and .symtab. + WriteDebugSymbols(builder, /* mini-debug-info= */ false, debug_info); + + // Write .debug_frame. + WriteCFISection(builder, debug_info.compiled_methods); + + // Group the methods into compilation units based on class. + std::unordered_map class_to_compilation_unit; + for (const MethodDebugInfo& mi : debug_info.compiled_methods) { + if (mi.dex_file != nullptr) { + auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index); + ElfCompilationUnit& cu = class_to_compilation_unit[&dex_class_def]; + cu.methods.push_back(&mi); + // All methods must have the same addressing mode otherwise the min/max below does not work. + DCHECK_EQ(cu.methods.front()->is_code_address_text_relative, mi.is_code_address_text_relative); + cu.is_code_address_text_relative = mi.is_code_address_text_relative; + cu.code_address = std::min(cu.code_address, mi.code_address); + cu.code_end = std::max(cu.code_end, mi.code_address + mi.code_size); + } + } + + // Sort compilation units to make the compiler output deterministic. + std::vector compilation_units; + compilation_units.reserve(class_to_compilation_unit.size()); + for (auto& it : class_to_compilation_unit) { + // The .debug_line section requires the methods to be sorted by code address. + std::stable_sort(it.second.methods.begin(), + it.second.methods.end(), + [](const MethodDebugInfo* a, const MethodDebugInfo* b) { + return a->code_address < b->code_address; + }); + compilation_units.push_back(std::move(it.second)); + } + std::sort(compilation_units.begin(), + compilation_units.end(), + [](ElfCompilationUnit& a, ElfCompilationUnit& b) { + // Sort by index of the first method within the method_infos array. + // This assumes that the order of method_infos is deterministic. + // Code address is not good for sorting due to possible duplicates. + return a.methods.front() < b.methods.front(); + }); + + // Write .debug_line section. + if (!compilation_units.empty()) { + ElfDebugLineWriter line_writer(builder); + line_writer.Start(); + for (auto& compilation_unit : compilation_units) { + line_writer.WriteCompilationUnit(compilation_unit); + } + line_writer.End(); + } + + // Write .debug_info section. + if (!compilation_units.empty()) { + ElfDebugInfoWriter info_writer(builder); + info_writer.Start(); + for (const auto& compilation_unit : compilation_units) { + ElfCompilationUnitWriter cu_writer(&info_writer); + cu_writer.Write(compilation_unit); + } + info_writer.End(); + } +} + +template +static std::vector MakeMiniDebugInfoInternal( + InstructionSet isa, + const InstructionSetFeatures* features ATTRIBUTE_UNUSED, + typename ElfTypes::Addr text_section_address, + size_t text_section_size, + typename ElfTypes::Addr dex_section_address, + size_t dex_section_size, + const DebugInfo& debug_info) { + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Mini-debug-info ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, &out)); + builder->Start(/* write_program_headers= */ false); + // Mirror ELF sections as NOBITS since the added symbols will reference them. + if (text_section_size != 0) { + builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size); + } + if (dex_section_size != 0) { + builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size); + } + if (!debug_info.Empty()) { + WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info); + } + if (!debug_info.compiled_methods.empty()) { + WriteCFISection(builder.get(), debug_info.compiled_methods); + } + builder->End(); + CHECK(builder->Good()); + std::vector compressed_buffer; + compressed_buffer.reserve(buffer.size() / 4); + XzCompress(ArrayRef(buffer), &compressed_buffer); + return compressed_buffer; +} + +std::vector MakeMiniDebugInfo( + InstructionSet isa, + const InstructionSetFeatures* features, + uint64_t text_section_address, + size_t text_section_size, + uint64_t dex_section_address, + size_t dex_section_size, + const DebugInfo& debug_info) { + if (Is64BitInstructionSet(isa)) { + return MakeMiniDebugInfoInternal(isa, + features, + text_section_address, + text_section_size, + dex_section_address, + dex_section_size, + debug_info); + } else { + return MakeMiniDebugInfoInternal(isa, + features, + text_section_address, + text_section_size, + dex_section_address, + dex_section_size, + debug_info); + } +} + +std::vector MakeElfFileForJIT( + InstructionSet isa, + const InstructionSetFeatures* features ATTRIBUTE_UNUSED, + bool mini_debug_info, + const MethodDebugInfo& method_info) { + using ElfTypes = ElfRuntimeTypes; + CHECK_EQ(sizeof(ElfTypes::Addr), static_cast(GetInstructionSetPointerSize(isa))); + CHECK_EQ(method_info.is_code_address_text_relative, false); + DebugInfo debug_info{}; + debug_info.compiled_methods = ArrayRef(&method_info, 1); + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Debug ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, &out)); + // No program headers since the ELF file is not linked and has no allocated sections. + builder->Start(/* write_program_headers= */ false); + builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size); + if (mini_debug_info) { + // The compression is great help for multiple methods but it is not worth it for a + // single method due to the overheads so skip the compression here for performance. + WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info); + WriteCFISection(builder.get(), debug_info.compiled_methods); + } else { + WriteDebugInfo(builder.get(), debug_info); + } + builder->End(); + CHECK(builder->Good()); + // Verify the ELF file by reading it back using the trivial reader. + if (kIsDebugBuild) { + using Elf_Sym = typename ElfTypes::Sym; + size_t num_syms = 0; + size_t num_cies = 0; + size_t num_fdes = 0; + using Reader = ElfDebugReader; + Reader reader(buffer); + reader.VisitFunctionSymbols([&](Elf_Sym sym, const char*) { + DCHECK_EQ(sym.st_value, method_info.code_address + CompiledMethod::CodeDelta(isa)); + DCHECK_EQ(sym.st_size, method_info.code_size); + num_syms++; + }); + reader.VisitDebugFrame([&](const Reader::CIE* cie ATTRIBUTE_UNUSED) { + num_cies++; + }, [&](const Reader::FDE* fde, const Reader::CIE* cie ATTRIBUTE_UNUSED) { + DCHECK_EQ(fde->sym_addr, method_info.code_address); + DCHECK_EQ(fde->sym_size, method_info.code_size); + num_fdes++; + }); + DCHECK_EQ(num_syms, 1u); + DCHECK_LE(num_cies, 1u); + DCHECK_LE(num_fdes, 1u); + } + return buffer; +} + +// Combine several mini-debug-info ELF files into one, while filtering some symbols. +std::vector PackElfFileForJIT( + ArrayRef jit_entries, + ArrayRef removed_symbols, + bool compress, + /*out*/ size_t* num_symbols) { + using ElfTypes = ElfRuntimeTypes; + using Elf_Addr = typename ElfTypes::Addr; + using Elf_Sym = typename ElfTypes::Sym; + const InstructionSet isa = kRuntimeISA; + CHECK_EQ(sizeof(Elf_Addr), static_cast(GetInstructionSetPointerSize(isa))); + const uint32_t kPcAlign = GetInstructionSetInstructionAlignment(isa); + auto is_pc_aligned = [](const void* pc) { return IsAligned(pc); }; + DCHECK(std::all_of(removed_symbols.begin(), removed_symbols.end(), is_pc_aligned)); + auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) { + // Remove thumb-bit, if any (using the fact that address is instruction aligned). + const void* code_ptr = AlignDown(reinterpret_cast(addr), kPcAlign); + return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr); + }; + uint64_t min_address = std::numeric_limits::max(); + uint64_t max_address = 0; + + // Produce the inner ELF file. + // It will contain the symbols (.symtab) and unwind information (.debug_frame). + std::vector inner_elf_file; + { + inner_elf_file.reserve(1 * KB); // Approximate size of ELF file with a single symbol. + VectorOutputStream out("Mini-debug-info ELF file for JIT", &inner_elf_file); + std::unique_ptr> builder(new ElfBuilder(isa, &out)); + builder->Start(/*write_program_headers=*/ false); + auto* text = builder->GetText(); + auto* strtab = builder->GetStrTab(); + auto* symtab = builder->GetSymTab(); + auto* debug_frame = builder->GetDebugFrame(); + std::deque symbols; + + using Reader = ElfDebugReader; + std::deque readers; + for (const JITCodeEntry* it : jit_entries) { + readers.emplace_back(GetJITCodeEntrySymFile(it)); + } + + // Write symbols names. All other data is buffered. + strtab->Start(); + strtab->Write(""); // strtab should start with empty string. + for (Reader& reader : readers) { + reader.VisitFunctionSymbols([&](Elf_Sym sym, const char* name) { + if (is_removed_symbol(sym.st_value)) { + return; + } + sym.st_name = strtab->Write(name); + symbols.push_back(sym); + min_address = std::min(min_address, sym.st_value); + max_address = std::max(max_address, sym.st_value + sym.st_size); + }); + } + strtab->End(); + + // Create .text covering the code range. Needed for gdb to find the symbols. + if (max_address > min_address) { + text->AllocateVirtualMemory(min_address, max_address - min_address); + } + + // Add the symbols. + *num_symbols = symbols.size(); + for (; !symbols.empty(); symbols.pop_front()) { + symtab->Add(symbols.front(), text); + } + symtab->WriteCachedSection(); + + // Add the CFI/unwind section. + debug_frame->Start(); + // ART always produces the same CIE, so we copy the first one and ignore the rest. + bool copied_cie = false; + for (Reader& reader : readers) { + reader.VisitDebugFrame([&](const Reader::CIE* cie) { + if (!copied_cie) { + debug_frame->WriteFully(cie->data(), cie->size()); + copied_cie = true; + } + }, [&](const Reader::FDE* fde, const Reader::CIE* cie ATTRIBUTE_UNUSED) { + DCHECK(copied_cie); + DCHECK_EQ(fde->cie_pointer, 0); + if (!is_removed_symbol(fde->sym_addr)) { + debug_frame->WriteFully(fde->data(), fde->size()); + } + }); + } + debug_frame->End(); + + builder->End(); + CHECK(builder->Good()); + } + + // Produce the outer ELF file. + // It contains only the inner ELF file compressed as .gnu_debugdata section. + // This extra wrapping is not necessary but the compression saves space. + if (compress) { + std::vector outer_elf_file; + std::vector gnu_debugdata; + gnu_debugdata.reserve(inner_elf_file.size() / 4); + XzCompress(ArrayRef(inner_elf_file), &gnu_debugdata); + + outer_elf_file.reserve(KB + gnu_debugdata.size()); + VectorOutputStream out("Mini-debug-info ELF file for JIT", &outer_elf_file); + std::unique_ptr> builder(new ElfBuilder(isa, &out)); + builder->Start(/*write_program_headers=*/ false); + if (max_address > min_address) { + builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address); + } + builder->WriteSection(".gnu_debugdata", &gnu_debugdata); + builder->End(); + CHECK(builder->Good()); + return outer_elf_file; + } else { + return inner_elf_file; + } +} + +std::vector WriteDebugElfFileForClasses( + InstructionSet isa, + const InstructionSetFeatures* features ATTRIBUTE_UNUSED, + const ArrayRef& types) + REQUIRES_SHARED(Locks::mutator_lock_) { + using ElfTypes = ElfRuntimeTypes; + CHECK_EQ(sizeof(ElfTypes::Addr), static_cast(GetInstructionSetPointerSize(isa))); + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Debug ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, &out)); + // No program headers since the ELF file is not linked and has no allocated sections. + builder->Start(/* write_program_headers= */ false); + ElfDebugInfoWriter info_writer(builder.get()); + info_writer.Start(); + ElfCompilationUnitWriter cu_writer(&info_writer); + cu_writer.Write(types); + info_writer.End(); + + builder->End(); + CHECK(builder->Good()); + return buffer; +} + +// Explicit instantiations +template void WriteDebugInfo( + ElfBuilder* builder, + const DebugInfo& debug_info); +template void WriteDebugInfo( + ElfBuilder* builder, + const DebugInfo& debug_info); + +} // namespace debug +} // namespace art diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h new file mode 100644 index 0000000..1ce3c6f --- /dev/null +++ b/compiler/debug/elf_debug_writer.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ + +#include + +#include "arch/instruction_set_features.h" +#include "base/array_ref.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "debug/debug_info.h" +#include "dwarf/dwarf_constants.h" +#include "elf/elf_builder.h" + +namespace art { +class OatHeader; +struct JITCodeEntry; +namespace mirror { +class Class; +} // namespace mirror +namespace debug { +struct MethodDebugInfo; + +template +void WriteDebugInfo( + ElfBuilder* builder, + const DebugInfo& debug_info); + +std::vector MakeMiniDebugInfo( + InstructionSet isa, + const InstructionSetFeatures* features, + uint64_t text_section_address, + size_t text_section_size, + uint64_t dex_section_address, + size_t dex_section_size, + const DebugInfo& debug_info); + +std::vector MakeElfFileForJIT( + InstructionSet isa, + const InstructionSetFeatures* features, + bool mini_debug_info, + const MethodDebugInfo& method_info); + +std::vector PackElfFileForJIT( + ArrayRef jit_entries, + ArrayRef removed_symbols, + bool compress, + /*out*/ size_t* num_symbols); + +std::vector WriteDebugElfFileForClasses( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& types) + REQUIRES_SHARED(Locks::mutator_lock_); + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h new file mode 100644 index 0000000..2ed3a4b --- /dev/null +++ b/compiler/debug/elf_symtab_writer.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ + +#include +#include + +#include "base/utils.h" +#include "debug/debug_info.h" +#include "debug/method_debug_info.h" +#include "dex/dex_file-inl.h" +#include "dex/code_item_accessors.h" +#include "elf/elf_builder.h" + +namespace art { +namespace debug { + +// The ARM specification defines three special mapping symbols +// $a, $t and $d which mark ARM, Thumb and data ranges respectively. +// These symbols can be used by tools, for example, to pretty +// print instructions correctly. Objdump will use them if they +// exist, but it will still work well without them. +// However, these extra symbols take space, so let's just generate +// one symbol which marks the whole .text section as code. +// Note that ARM's Streamline requires it to match function symbol. +constexpr bool kGenerateArmMappingSymbol = true; + +// Magic name for .symtab symbols which enumerate dex files used +// by this ELF file (currently mmapped inside the .dex section). +constexpr const char* kDexFileSymbolName = "$dexfile"; + +template +static void WriteDebugSymbols(ElfBuilder* builder, + bool mini_debug_info, + const DebugInfo& debug_info) { + uint64_t mapping_symbol_address = std::numeric_limits::max(); + const auto* text = builder->GetText(); + auto* strtab = builder->GetStrTab(); + auto* symtab = builder->GetSymTab(); + + if (debug_info.Empty()) { + return; + } + + // Find all addresses which contain deduped methods. + // The first instance of method is not marked deduped_, but the rest is. + std::unordered_set deduped_addresses; + for (const MethodDebugInfo& info : debug_info.compiled_methods) { + if (info.deduped) { + deduped_addresses.insert(info.code_address); + } + if (kGenerateArmMappingSymbol && info.isa == InstructionSet::kThumb2) { + uint64_t address = info.code_address; + address += info.is_code_address_text_relative ? text->GetAddress() : 0; + mapping_symbol_address = std::min(mapping_symbol_address, address); + } + } + + strtab->Start(); + strtab->Write(""); // strtab should start with empty string. + // Generate ARM mapping symbols. ELF local symbols must be added first. + if (mapping_symbol_address != std::numeric_limits::max()) { + symtab->Add(strtab->Write("$t"), text, mapping_symbol_address, 0, STB_LOCAL, STT_NOTYPE); + } + // Add symbols for compiled methods. + for (const MethodDebugInfo& info : debug_info.compiled_methods) { + if (info.deduped) { + continue; // Add symbol only for the first instance. + } + size_t name_offset; + if (!info.custom_name.empty()) { + name_offset = strtab->Write(info.custom_name); + } else { + DCHECK(info.dex_file != nullptr); + std::string name = info.dex_file->PrettyMethod(info.dex_method_index, !mini_debug_info); + if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) { + name += " [DEDUPED]"; + } + name_offset = strtab->Write(name); + } + + uint64_t address = info.code_address; + address += info.is_code_address_text_relative ? text->GetAddress() : 0; + // Add in code delta, e.g., thumb bit 0 for Thumb2 code. + address += CompiledMethod::CodeDelta(info.isa); + symtab->Add(name_offset, text, address, info.code_size, STB_GLOBAL, STT_FUNC); + } + // Add symbols for dex files. + if (!debug_info.dex_files.empty() && builder->GetDex()->Exists()) { + auto dex = builder->GetDex(); + for (auto it : debug_info.dex_files) { + uint64_t dex_address = dex->GetAddress() + it.first /* offset within the section */; + const DexFile* dex_file = it.second; + typename ElfTypes::Word dex_name = strtab->Write(kDexFileSymbolName); + symtab->Add(dex_name, dex, dex_address, dex_file->Size(), STB_GLOBAL, STT_FUNC); + } + } + strtab->End(); + + // Symbols are buffered and written after names (because they are smaller). + symtab->WriteCachedSection(); +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ + diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h new file mode 100644 index 0000000..152db6e --- /dev/null +++ b/compiler/debug/method_debug_info.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ +#define ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ + +#include + +#include "arch/instruction_set.h" +#include "base/array_ref.h" +#include "dex/dex_file.h" + +namespace art { +namespace debug { + +struct MethodDebugInfo { + std::string custom_name; + const DexFile* dex_file; // Native methods (trampolines) do not reference dex file. + size_t class_def_index; + uint32_t dex_method_index; + uint32_t access_flags; + const dex::CodeItem* code_item; + InstructionSet isa; + bool deduped; + bool is_native_debuggable; + bool is_optimized; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address; + uint32_t code_size; + uint32_t frame_size_in_bytes; + const uint8_t* code_info; + ArrayRef cfi; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ diff --git a/compiler/debug/src_map_elem.h b/compiler/debug/src_map_elem.h new file mode 100644 index 0000000..5286b8c --- /dev/null +++ b/compiler/debug/src_map_elem.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_SRC_MAP_ELEM_H_ +#define ART_COMPILER_DEBUG_SRC_MAP_ELEM_H_ + +#include + +namespace art { + +class SrcMapElem { + public: + uint32_t from_; + int32_t to_; +}; + +inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) { + if (lhs.from_ != rhs.from_) { + return lhs.from_ < rhs.from_; + } + return lhs.to_ < rhs.to_; +} + +inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) { + return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_; +} + +} // namespace art + +#endif // ART_COMPILER_DEBUG_SRC_MAP_ELEM_H_ diff --git a/compiler/debug/src_map_elem_test.cc b/compiler/debug/src_map_elem_test.cc new file mode 100644 index 0000000..ceaa53f --- /dev/null +++ b/compiler/debug/src_map_elem_test.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "src_map_elem.h" + +#include "base/macros.h" + +namespace art { +namespace debug { + +TEST(SrcMapElem, Operators) { + SrcMapElem elems[] = { + { 1u, -1 }, + { 1u, 0 }, + { 1u, 1 }, + { 2u, -1 }, + { 2u, 0 }, // Index 4. + { 2u, 1 }, + { 2u, 0u }, // Index 6: Arbitrarily add identical SrcMapElem with index 4. + }; + + for (size_t i = 0; i != arraysize(elems); ++i) { + for (size_t j = 0; j != arraysize(elems); ++j) { + bool expected = (i != 6u ? i : 4u) == (j != 6u ? j : 4u); + EXPECT_EQ(expected, elems[i] == elems[j]) << i << " " << j; + } + } + + for (size_t i = 0; i != arraysize(elems); ++i) { + for (size_t j = 0; j != arraysize(elems); ++j) { + bool expected = (i != 6u ? i : 4u) < (j != 6u ? j : 4u); + EXPECT_EQ(expected, elems[i] < elems[j]) << i << " " << j; + } + } +} + +} // namespace debug +} // namespace art diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc new file mode 100644 index 0000000..b0f025d --- /dev/null +++ b/compiler/dex/inline_method_analyser.cc @@ -0,0 +1,737 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inline_method_analyser.h" + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/enums.h" +#include "class_linker-inl.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file-inl.h" +#include "dex/dex_instruction-inl.h" +#include "dex/dex_instruction.h" +#include "dex/dex_instruction_utils.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache-inl.h" + +/* + * NOTE: This code is part of the quick compiler. It lives in the runtime + * only to allow the debugger to check whether a method has been inlined. + */ + +namespace art { + +namespace { // anonymous namespace + +// Helper class for matching a pattern. +class Matcher { + public: + // Match function type. + using MatchFn = bool(Matcher*); + + template + static bool Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]); + + // Match and advance. + + static bool Mark(Matcher* matcher); + + template + static bool Required(Matcher* matcher); + + template + static bool Repeated(Matcher* matcher); // On match, returns to the mark. + + // Match an individual instruction. + + template bool Opcode(); + bool Const0(); + bool IPutOnThis(); + + private: + explicit Matcher(const CodeItemDataAccessor* code_item) + : code_item_(code_item), + instruction_(code_item->begin()) {} + + static bool DoMatch(const CodeItemDataAccessor* code_item, MatchFn* const* pattern, size_t size); + + const CodeItemDataAccessor* const code_item_; + DexInstructionIterator instruction_; + size_t pos_ = 0u; + size_t mark_ = 0u; +}; + +template +bool Matcher::Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]) { + return DoMatch(code_item, pattern, size); +} + +bool Matcher::Mark(Matcher* matcher) { + matcher->pos_ += 1u; // Advance to the next match function before marking. + matcher->mark_ = matcher->pos_; + return true; +} + +template +bool Matcher::Required(Matcher* matcher) { + if (!(matcher->*Fn)()) { + return false; + } + matcher->pos_ += 1u; + ++matcher->instruction_; + return true; +} + +template +bool Matcher::Repeated(Matcher* matcher) { + if (!(matcher->*Fn)()) { + // Didn't match optional instruction, try the next match function. + matcher->pos_ += 1u; + return true; + } + matcher->pos_ = matcher->mark_; + ++matcher->instruction_; + return true; +} + +template +bool Matcher::Opcode() { + return instruction_->Opcode() == opcode; +} + +// Match const 0. +bool Matcher::Const0() { + return IsInstructionDirectConst(instruction_->Opcode()) && + (instruction_->Opcode() == Instruction::CONST_WIDE ? instruction_->VRegB_51l() == 0 + : instruction_->VRegB() == 0); +} + +bool Matcher::IPutOnThis() { + DCHECK_NE(code_item_->InsSize(), 0u); + return IsInstructionIPut(instruction_->Opcode()) && + instruction_->VRegB_22c() == code_item_->RegistersSize() - code_item_->InsSize(); +} + +bool Matcher::DoMatch(const CodeItemDataAccessor* code_item, MatchFn* const* pattern, size_t size) { + Matcher matcher(code_item); + while (matcher.pos_ != size) { + if (!pattern[matcher.pos_](&matcher)) { + return false; + } + } + return true; +} + +// Used for a single invoke in a constructor. In that situation, the method verifier makes +// sure we invoke a constructor either in the same class or superclass with at least "this". +ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT); + if (kIsDebugBuild) { + CodeItemDataAccessor accessor(method->DexInstructionData()); + DCHECK_EQ(invoke_direct->VRegC_35c(), + accessor.RegistersSize() - accessor.InsSize()); + } + uint32_t method_index = invoke_direct->VRegB_35c(); + ArtMethod* target_method = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( + method_index, method->GetDexCache(), method->GetClassLoader()); + if (kIsDebugBuild && target_method != nullptr) { + CHECK(!target_method->IsStatic()); + CHECK(target_method->IsConstructor()); + CHECK(target_method->GetDeclaringClass() == method->GetDeclaringClass() || + target_method->GetDeclaringClass() == method->GetDeclaringClass()->GetSuperClass()); + } + return target_method; +} + +// Return the forwarded arguments and check that all remaining arguments are zero. +// If the check fails, return static_cast(-1). +size_t CountForwardedConstructorArguments(const CodeItemDataAccessor* code_item, + const Instruction* invoke_direct, + uint16_t zero_vreg_mask) { + DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT); + size_t number_of_args = invoke_direct->VRegA_35c(); + DCHECK_NE(number_of_args, 0u); + uint32_t args[Instruction::kMaxVarArgRegs]; + invoke_direct->GetVarArgs(args); + uint16_t this_vreg = args[0]; + DCHECK_EQ(this_vreg, code_item->RegistersSize() - code_item->InsSize()); // Checked by verifier. + size_t forwarded = 1u; + while (forwarded < number_of_args && + args[forwarded] == this_vreg + forwarded && + (zero_vreg_mask & (1u << args[forwarded])) == 0) { + ++forwarded; + } + for (size_t i = forwarded; i != number_of_args; ++i) { + if ((zero_vreg_mask & (1u << args[i])) == 0) { + return static_cast(-1); + } + } + return forwarded; +} + +uint16_t GetZeroVRegMask(const Instruction* const0) { + DCHECK(IsInstructionDirectConst(const0->Opcode())); + DCHECK((const0->Opcode() == Instruction::CONST_WIDE) ? const0->VRegB_51l() == 0u + : const0->VRegB() == 0); + uint16_t base_mask = IsInstructionConstWide(const0->Opcode()) ? 3u : 1u; + return base_mask << const0->VRegA(); +} + +// We limit the number of IPUTs storing parameters. There can be any number +// of IPUTs that store the value 0 as they are useless in a constructor as +// the object always starts zero-initialized. We also eliminate all but the +// last store to any field as they are not observable; not even if the field +// is volatile as no reference to the object can escape from a constructor +// with this pattern. +static constexpr size_t kMaxConstructorIPuts = 3u; + +struct ConstructorIPutData { + ConstructorIPutData() : field_index(DexFile::kDexNoIndex16), arg(0u) { } + + uint16_t field_index; + uint16_t arg; +}; + +bool RecordConstructorIPut(ArtMethod* method, + const Instruction* new_iput, + uint16_t this_vreg, + uint16_t zero_vreg_mask, + /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts]) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(IsInstructionIPut(new_iput->Opcode())); + uint32_t field_index = new_iput->VRegC_22c(); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static= */ false); + if (UNLIKELY(field == nullptr)) { + return false; + } + // Remove previous IPUT to the same field, if any. Different field indexes may refer + // to the same field, so we need to compare resolved fields from the dex cache. + for (size_t old_pos = 0; old_pos != arraysize(iputs); ++old_pos) { + if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) { + break; + } + ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index, + method, + /* is_static= */ false); + DCHECK(f != nullptr); + if (f == field) { + auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos); + *back_it = ConstructorIPutData(); + break; + } + } + // If the stored value isn't zero, record the IPUT. + if ((zero_vreg_mask & (1u << new_iput->VRegA_22c())) == 0u) { + size_t new_pos = 0; + while (new_pos != arraysize(iputs) && iputs[new_pos].field_index != DexFile::kDexNoIndex16) { + ++new_pos; + } + if (new_pos == arraysize(iputs)) { + return false; // Exceeded capacity of the output array. + } + iputs[new_pos].field_index = field_index; + iputs[new_pos].arg = new_iput->VRegA_22c() - this_vreg; + } + return true; +} + +bool DoAnalyseConstructor(const CodeItemDataAccessor* code_item, + ArtMethod* method, + /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts]) + REQUIRES_SHARED(Locks::mutator_lock_) { + // On entry we should not have any IPUTs yet. + DCHECK_EQ(0, std::count_if( + iputs, + iputs + arraysize(iputs), + [](const ConstructorIPutData& iput_data) { + return iput_data.field_index != DexFile::kDexNoIndex16; + })); + + // Limit the maximum number of code units we're willing to match. + static constexpr size_t kMaxCodeUnits = 16u; + + // Limit the number of registers that the constructor may use to 16. + // Given that IPUTs must use low 16 registers and we do not match MOVEs, + // this is a reasonable limitation. + static constexpr size_t kMaxVRegs = 16u; + + // We try to match a constructor that calls another constructor (either in + // superclass or in the same class) with the same parameters, or with some + // parameters truncated (allowed only for calls to superclass constructor) + // or with extra parameters with value 0 (with any type, including null). + // This call can be followed by optional IPUTs on "this" storing either one + // of the parameters or 0 and the code must then finish with RETURN_VOID. + // The called constructor must be either java.lang.Object.() or it + // must also match the same pattern. + static Matcher::MatchFn* const kConstructorPattern[] = { + &Matcher::Mark, + &Matcher::Repeated<&Matcher::Const0>, + &Matcher::Required<&Matcher::Opcode>, + &Matcher::Mark, + &Matcher::Repeated<&Matcher::Const0>, + &Matcher::Repeated<&Matcher::IPutOnThis>, + &Matcher::Required<&Matcher::Opcode>, + }; + + DCHECK(method != nullptr); + DCHECK(!method->IsStatic()); + DCHECK(method->IsConstructor()); + DCHECK(code_item != nullptr); + if (!method->GetDeclaringClass()->IsVerified() || + code_item->InsnsSizeInCodeUnits() > kMaxCodeUnits || + code_item->RegistersSize() > kMaxVRegs || + !Matcher::Match(code_item, kConstructorPattern)) { + return false; + } + + // Verify the invoke, prevent a few odd cases and collect IPUTs. + uint16_t this_vreg = code_item->RegistersSize() - code_item->InsSize(); + uint16_t zero_vreg_mask = 0u; + + for (const DexInstructionPcPair& pair : *code_item) { + const Instruction& instruction = pair.Inst(); + if (instruction.Opcode() == Instruction::RETURN_VOID) { + break; + } else if (instruction.Opcode() == Instruction::INVOKE_DIRECT) { + ArtMethod* target_method = GetTargetConstructor(method, &instruction); + if (target_method == nullptr) { + return false; + } + // We allow forwarding constructors only if they pass more arguments + // to prevent infinite recursion. + if (target_method->GetDeclaringClass() == method->GetDeclaringClass() && + instruction.VRegA_35c() <= code_item->InsSize()) { + return false; + } + size_t forwarded = CountForwardedConstructorArguments(code_item, &instruction, zero_vreg_mask); + if (forwarded == static_cast(-1)) { + return false; + } + if (target_method->GetDeclaringClass()->IsObjectClass()) { + DCHECK_EQ(target_method->DexInstructionData().begin()->Opcode(), Instruction::RETURN_VOID); + } else { + CodeItemDataAccessor target_code_item(target_method->DexInstructionData()); + if (!target_code_item.HasCodeItem()) { + return false; // Native constructor? + } + if (!DoAnalyseConstructor(&target_code_item, target_method, iputs)) { + return false; + } + // Prune IPUTs with zero input. + auto kept_end = std::remove_if( + iputs, + iputs + arraysize(iputs), + [forwarded](const ConstructorIPutData& iput_data) { + return iput_data.arg >= forwarded; + }); + std::fill(kept_end, iputs + arraysize(iputs), ConstructorIPutData()); + // If we have any IPUTs from the call, check that the target method is in the same + // dex file (compare DexCache references), otherwise field_indexes would be bogus. + if (iputs[0].field_index != DexFile::kDexNoIndex16 && + target_method->GetDexCache() != method->GetDexCache()) { + return false; + } + } + } else if (IsInstructionDirectConst(instruction.Opcode())) { + zero_vreg_mask |= GetZeroVRegMask(&instruction); + if ((zero_vreg_mask & (1u << this_vreg)) != 0u) { + return false; // Overwriting `this` is unsupported. + } + } else { + DCHECK(IsInstructionIPut(instruction.Opcode())); + DCHECK_EQ(instruction.VRegB_22c(), this_vreg); + if (!RecordConstructorIPut(method, &instruction, this_vreg, zero_vreg_mask, iputs)) { + return false; + } + } + } + return true; +} + +} // anonymous namespace + +bool AnalyseConstructor(const CodeItemDataAccessor* code_item, + ArtMethod* method, + InlineMethod* result) + REQUIRES_SHARED(Locks::mutator_lock_) { + ConstructorIPutData iputs[kMaxConstructorIPuts]; + if (!DoAnalyseConstructor(code_item, method, iputs)) { + return false; + } + static_assert(kMaxConstructorIPuts == 3, "Unexpected limit"); // Code below depends on this. + DCHECK(iputs[0].field_index != DexFile::kDexNoIndex16 || + iputs[1].field_index == DexFile::kDexNoIndex16); + DCHECK(iputs[1].field_index != DexFile::kDexNoIndex16 || + iputs[2].field_index == DexFile::kDexNoIndex16); + +#define STORE_IPUT(n) \ + do { \ + result->d.constructor_data.iput##n##_field_index = iputs[n].field_index; \ + result->d.constructor_data.iput##n##_arg = iputs[n].arg; \ + } while (false) + + STORE_IPUT(0); + STORE_IPUT(1); + STORE_IPUT(2); +#undef STORE_IPUT + + result->opcode = kInlineOpConstructor; + result->d.constructor_data.reserved = 0u; + return true; +} + +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT), + "iget_object type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN), + "iget_boolean type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type"); +static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT), + "iput_object type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN), + "iput_boolean type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type"); +static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant"); +static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == + InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant"); + +bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* result) { + CodeItemDataAccessor code_item(method->DexInstructionData()); + if (!code_item.HasCodeItem()) { + // Native or abstract. + return false; + } + return AnalyseMethodCode(&code_item, + MethodReference(method->GetDexFile(), method->GetDexMethodIndex()), + method->IsStatic(), + method, + result); +} + +bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) { + // We currently support only plain return or 2-instruction methods. + + DCHECK_NE(code_item->InsnsSizeInCodeUnits(), 0u); + Instruction::Code opcode = code_item->begin()->Opcode(); + + switch (opcode) { + case Instruction::RETURN_VOID: + if (result != nullptr) { + result->opcode = kInlineOpNop; + result->d.data = 0u; + } + return true; + case Instruction::RETURN: + case Instruction::RETURN_OBJECT: + case Instruction::RETURN_WIDE: + return AnalyseReturnMethod(code_item, result); + case Instruction::CONST: + case Instruction::CONST_4: + case Instruction::CONST_16: + case Instruction::CONST_HIGH16: + // TODO: Support wide constants (RETURN_WIDE). + if (AnalyseConstMethod(code_item, result)) { + return true; + } + FALLTHROUGH_INTENDED; + case Instruction::CONST_WIDE: + case Instruction::CONST_WIDE_16: + case Instruction::CONST_WIDE_32: + case Instruction::CONST_WIDE_HIGH16: + case Instruction::INVOKE_DIRECT: + if (method != nullptr && !method->IsStatic() && method->IsConstructor()) { + return AnalyseConstructor(code_item, method, result); + } + return false; + case Instruction::IGET: + case Instruction::IGET_OBJECT: + case Instruction::IGET_BOOLEAN: + case Instruction::IGET_BYTE: + case Instruction::IGET_CHAR: + case Instruction::IGET_SHORT: + case Instruction::IGET_WIDE: + // TODO: Add handling for JIT. + // case Instruction::IGET_QUICK: + // case Instruction::IGET_WIDE_QUICK: + // case Instruction::IGET_OBJECT_QUICK: + return AnalyseIGetMethod(code_item, method_ref, is_static, method, result); + case Instruction::IPUT: + case Instruction::IPUT_OBJECT: + case Instruction::IPUT_BOOLEAN: + case Instruction::IPUT_BYTE: + case Instruction::IPUT_CHAR: + case Instruction::IPUT_SHORT: + case Instruction::IPUT_WIDE: + // TODO: Add handling for JIT. + // case Instruction::IPUT_QUICK: + // case Instruction::IPUT_WIDE_QUICK: + // case Instruction::IPUT_OBJECT_QUICK: + return AnalyseIPutMethod(code_item, method_ref, is_static, method, result); + default: + return false; + } +} + +bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) { + const dex::MethodId& method_id = ref.dex_file->GetMethodId(ref.index); + const char* method_name = ref.dex_file->GetMethodName(method_id); + // javac names synthetic accessors "access$nnn", + // jack names them "-getN", "-putN", "-wrapN". + return strncmp(method_name, "access$", strlen("access$")) == 0 || + strncmp(method_name, "-", strlen("-")) == 0; +} + +bool InlineMethodAnalyser::AnalyseReturnMethod(const CodeItemDataAccessor* code_item, + InlineMethod* result) { + DexInstructionIterator return_instruction = code_item->begin(); + Instruction::Code return_opcode = return_instruction->Opcode(); + uint32_t reg = return_instruction->VRegA_11x(); + uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize(); + DCHECK_GE(reg, arg_start); + DCHECK_LT((return_opcode == Instruction::RETURN_WIDE) ? reg + 1 : reg, + code_item->RegistersSize()); + + if (result != nullptr) { + result->opcode = kInlineOpReturnArg; + InlineReturnArgData* data = &result->d.return_data; + data->arg = reg - arg_start; + data->is_wide = (return_opcode == Instruction::RETURN_WIDE) ? 1u : 0u; + data->is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u; + data->reserved = 0u; + data->reserved2 = 0u; + } + return true; +} + +bool InlineMethodAnalyser::AnalyseConstMethod(const CodeItemDataAccessor* code_item, + InlineMethod* result) { + DexInstructionIterator instruction = code_item->begin(); + const Instruction* return_instruction = instruction->Next(); + Instruction::Code return_opcode = return_instruction->Opcode(); + if (return_opcode != Instruction::RETURN && + return_opcode != Instruction::RETURN_OBJECT) { + return false; + } + + int32_t return_reg = return_instruction->VRegA_11x(); + DCHECK_LT(return_reg, code_item->RegistersSize()); + + int32_t const_value = instruction->VRegB(); + if (instruction->Opcode() == Instruction::CONST_HIGH16) { + const_value <<= 16; + } + DCHECK_LT(instruction->VRegA(), code_item->RegistersSize()); + if (instruction->VRegA() != return_reg) { + return false; // Not returning the value set by const? + } + if (return_opcode == Instruction::RETURN_OBJECT && const_value != 0) { + return false; // Returning non-null reference constant? + } + if (result != nullptr) { + result->opcode = kInlineOpNonWideConst; + result->d.data = static_cast(const_value); + } + return true; +} + +bool InlineMethodAnalyser::AnalyseIGetMethod(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) { + DexInstructionIterator instruction = code_item->begin(); + Instruction::Code opcode = instruction->Opcode(); + DCHECK(IsInstructionIGet(opcode)); + + const Instruction* return_instruction = instruction->Next(); + Instruction::Code return_opcode = return_instruction->Opcode(); + if (!(return_opcode == Instruction::RETURN_WIDE && opcode == Instruction::IGET_WIDE) && + !(return_opcode == Instruction::RETURN_OBJECT && opcode == Instruction::IGET_OBJECT) && + !(return_opcode == Instruction::RETURN && opcode != Instruction::IGET_WIDE && + opcode != Instruction::IGET_OBJECT)) { + return false; + } + + uint32_t return_reg = return_instruction->VRegA_11x(); + DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg, + code_item->RegistersSize()); + + uint32_t dst_reg = instruction->VRegA_22c(); + uint32_t object_reg = instruction->VRegB_22c(); + uint32_t field_idx = instruction->VRegC_22c(); + uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize(); + DCHECK_GE(object_reg, arg_start); + DCHECK_LT(object_reg, code_item->RegistersSize()); + uint32_t object_arg = object_reg - arg_start; + + DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->RegistersSize()); + if (dst_reg != return_reg) { + return false; // Not returning the value retrieved by IGET? + } + + if (is_static || object_arg != 0u) { + // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE). + // Allow synthetic accessors. We don't care about losing their stack frame in NPE. + if (!IsSyntheticAccessor(method_ref)) { + return false; + } + } + + // InlineIGetIPutData::object_arg is only 4 bits wide. + static constexpr uint16_t kMaxObjectArg = 15u; + if (object_arg > kMaxObjectArg) { + return false; + } + + if (result != nullptr) { + InlineIGetIPutData* data = &result->d.ifield_data; + if (!ComputeSpecialAccessorInfo(method, field_idx, false, data)) { + return false; + } + result->opcode = kInlineOpIGet; + data->op_variant = IGetVariant(opcode); + data->method_is_static = is_static ? 1u : 0u; + data->object_arg = object_arg; // Allow IGET on any register, not just "this". + data->src_arg = 0u; + data->return_arg_plus1 = 0u; + } + return true; +} + +bool InlineMethodAnalyser::AnalyseIPutMethod(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) { + DexInstructionIterator instruction = code_item->begin(); + Instruction::Code opcode = instruction->Opcode(); + DCHECK(IsInstructionIPut(opcode)); + + const Instruction* return_instruction = instruction->Next(); + Instruction::Code return_opcode = return_instruction->Opcode(); + uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize(); + uint16_t return_arg_plus1 = 0u; + if (return_opcode != Instruction::RETURN_VOID) { + if (return_opcode != Instruction::RETURN && + return_opcode != Instruction::RETURN_OBJECT && + return_opcode != Instruction::RETURN_WIDE) { + return false; + } + // Returning an argument. + uint32_t return_reg = return_instruction->VRegA_11x(); + DCHECK_GE(return_reg, arg_start); + DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1u : return_reg, + code_item->RegistersSize()); + return_arg_plus1 = return_reg - arg_start + 1u; + } + + uint32_t src_reg = instruction->VRegA_22c(); + uint32_t object_reg = instruction->VRegB_22c(); + uint32_t field_idx = instruction->VRegC_22c(); + DCHECK_GE(object_reg, arg_start); + DCHECK_LT(object_reg, code_item->RegistersSize()); + DCHECK_GE(src_reg, arg_start); + DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->RegistersSize()); + uint32_t object_arg = object_reg - arg_start; + uint32_t src_arg = src_reg - arg_start; + + if (is_static || object_arg != 0u) { + // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE). + // Allow synthetic accessors. We don't care about losing their stack frame in NPE. + if (!IsSyntheticAccessor(method_ref)) { + return false; + } + } + + // InlineIGetIPutData::object_arg/src_arg/return_arg_plus1 are each only 4 bits wide. + static constexpr uint16_t kMaxObjectArg = 15u; + static constexpr uint16_t kMaxSrcArg = 15u; + static constexpr uint16_t kMaxReturnArgPlus1 = 15u; + if (object_arg > kMaxObjectArg || src_arg > kMaxSrcArg || return_arg_plus1 > kMaxReturnArgPlus1) { + return false; + } + + if (result != nullptr) { + InlineIGetIPutData* data = &result->d.ifield_data; + if (!ComputeSpecialAccessorInfo(method, field_idx, true, data)) { + return false; + } + result->opcode = kInlineOpIPut; + data->op_variant = IPutVariant(opcode); + data->method_is_static = is_static ? 1u : 0u; + data->object_arg = object_arg; // Allow IPUT on any register, not just "this". + data->src_arg = src_arg; + data->return_arg_plus1 = return_arg_plus1; + } + return true; +} + +bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method, + uint32_t field_idx, + bool is_put, + InlineIGetIPutData* result) { + if (method == nullptr) { + return false; + } + ObjPtr dex_cache = method->GetDexCache(); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static= */ false); + if (field == nullptr || field->IsStatic()) { + return false; + } + ObjPtr method_class = method->GetDeclaringClass(); + ObjPtr field_class = field->GetDeclaringClass(); + if (!method_class->CanAccessResolvedField(field_class, field, dex_cache, field_idx) || + (is_put && field->IsFinal() && method_class != field_class)) { + return false; + } + DCHECK_GE(field->GetOffset().Int32Value(), 0); + // Historical note: We made sure not to interleave function calls with bit field writes to + // placate Valgrind. Bug: 27552451. + uint32_t field_offset = field->GetOffset().Uint32Value(); + bool is_volatile = field->IsVolatile(); + result->field_idx = field_idx; + result->field_offset = field_offset; + result->is_volatile = is_volatile ? 1u : 0u; + return true; +} + +} // namespace art diff --git a/compiler/dex/inline_method_analyser.h b/compiler/dex/inline_method_analyser.h new file mode 100644 index 0000000..e1d652a --- /dev/null +++ b/compiler/dex/inline_method_analyser.h @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ +#define ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ + +#include "base/macros.h" +#include "base/mutex.h" +#include "dex/dex_file.h" +#include "dex/dex_instruction.h" +#include "dex/method_reference.h" + +/* + * NOTE: This code is part of the quick compiler. It lives in the runtime + * only to allow the debugger to check whether a method has been inlined. + */ + +namespace art { + +class CodeItemDataAccessor; + +namespace verifier { +class MethodVerifier; +} // namespace verifier +class ArtMethod; + +enum InlineMethodOpcode : uint16_t { + kInlineOpNop, + kInlineOpReturnArg, + kInlineOpNonWideConst, + kInlineOpIGet, + kInlineOpIPut, + kInlineOpConstructor, +}; + +struct InlineIGetIPutData { + // The op_variant below is DexMemAccessType but the runtime doesn't know that enumeration. + uint16_t op_variant : 3; + uint16_t method_is_static : 1; + uint16_t object_arg : 4; + uint16_t src_arg : 4; // iput only + uint16_t return_arg_plus1 : 4; // iput only, method argument to return + 1, 0 = return void. + uint16_t field_idx; + uint32_t is_volatile : 1; + uint32_t field_offset : 31; +}; +static_assert(sizeof(InlineIGetIPutData) == sizeof(uint64_t), "Invalid size of InlineIGetIPutData"); + +struct InlineReturnArgData { + uint16_t arg; + uint16_t is_wide : 1; + uint16_t is_object : 1; + uint16_t reserved : 14; + uint32_t reserved2; +}; +static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t), + "Invalid size of InlineReturnArgData"); + +struct InlineConstructorData { + // There can be up to 3 IPUTs, unused fields are marked with kNoDexIndex16. + uint16_t iput0_field_index; + uint16_t iput1_field_index; + uint16_t iput2_field_index; + uint16_t iput0_arg : 4; + uint16_t iput1_arg : 4; + uint16_t iput2_arg : 4; + uint16_t reserved : 4; +}; +static_assert(sizeof(InlineConstructorData) == sizeof(uint64_t), + "Invalid size of InlineConstructorData"); + +struct InlineMethod { + InlineMethodOpcode opcode; + union { + uint64_t data; + InlineIGetIPutData ifield_data; + InlineReturnArgData return_data; + InlineConstructorData constructor_data; + } d; +}; + +class InlineMethodAnalyser { + public: + /** + * Analyse method code to determine if the method is a candidate for inlining. + * If it is, record the inlining data. + * + * @return true if the method is a candidate for inlining, false otherwise. + */ + static bool AnalyseMethodCode(ArtMethod* method, InlineMethod* result) + REQUIRES_SHARED(Locks::mutator_lock_); + + static constexpr bool IsInstructionIGet(Instruction::Code opcode) { + return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT; + } + + static constexpr bool IsInstructionIPut(Instruction::Code opcode) { + return Instruction::IPUT <= opcode && opcode <= Instruction::IPUT_SHORT; + } + + static constexpr uint16_t IGetVariant(Instruction::Code opcode) { + return opcode - Instruction::IGET; + } + + static constexpr uint16_t IPutVariant(Instruction::Code opcode) { + return opcode - Instruction::IPUT; + } + + // Determines whether the method is a synthetic accessor (method name starts with "access$"). + static bool IsSyntheticAccessor(MethodReference ref); + + private: + static bool AnalyseMethodCode(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) + REQUIRES_SHARED(Locks::mutator_lock_); + static bool AnalyseReturnMethod(const CodeItemDataAccessor* code_item, InlineMethod* result); + static bool AnalyseConstMethod(const CodeItemDataAccessor* code_item, InlineMethod* result); + static bool AnalyseIGetMethod(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) + REQUIRES_SHARED(Locks::mutator_lock_); + static bool AnalyseIPutMethod(const CodeItemDataAccessor* code_item, + const MethodReference& method_ref, + bool is_static, + ArtMethod* method, + InlineMethod* result) + REQUIRES_SHARED(Locks::mutator_lock_); + + // Can we fast path instance field access in a verified accessor? + // If yes, computes field's offset and volatility and whether the method is static or not. + static bool ComputeSpecialAccessorInfo(ArtMethod* method, + uint32_t field_idx, + bool is_put, + InlineIGetIPutData* result) + REQUIRES_SHARED(Locks::mutator_lock_); +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc new file mode 100644 index 0000000..e7a3817 --- /dev/null +++ b/compiler/dex/verification_results.cc @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verification_results.h" + +#include + +#include "base/mutex-inl.h" +#include "base/stl_util.h" +#include "driver/compiler_options.h" +#include "runtime.h" +#include "thread-current-inl.h" +#include "thread.h" +#include "utils/atomic_dex_ref_map-inl.h" +#include "verified_method.h" +#include "verifier/method_verifier-inl.h" + +namespace art { + +VerificationResults::VerificationResults(const CompilerOptions* compiler_options) + : compiler_options_(compiler_options), + verified_methods_lock_("compiler verified methods lock"), + rejected_classes_lock_("compiler rejected classes lock") {} + +VerificationResults::~VerificationResults() { + WriterMutexLock mu(Thread::Current(), verified_methods_lock_); + STLDeleteValues(&verified_methods_); + atomic_verified_methods_.Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, + const VerifiedMethod* method) { + delete method; + }); +} + +void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) { + DCHECK(method_verifier != nullptr); + MethodReference ref = method_verifier->GetMethodReference(); + std::unique_ptr verified_method(VerifiedMethod::Create(method_verifier)); + if (verified_method == nullptr) { + // We'll punt this later. + return; + } + AtomicMap::InsertResult result = atomic_verified_methods_.Insert(ref, + /*expected*/ nullptr, + verified_method.get()); + const VerifiedMethod* existing = nullptr; + bool inserted; + if (result != AtomicMap::kInsertResultInvalidDexFile) { + inserted = (result == AtomicMap::kInsertResultSuccess); + if (!inserted) { + // Rare case. + CHECK(atomic_verified_methods_.Get(ref, &existing)); + CHECK_NE(verified_method.get(), existing); + } + } else { + WriterMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + inserted = it == verified_methods_.end(); + if (inserted) { + verified_methods_.Put(ref, verified_method.get()); + DCHECK(verified_methods_.find(ref) != verified_methods_.end()); + } else { + existing = it->second; + } + } + if (inserted) { + // Successfully added, release the unique_ptr since we no longer have ownership. + DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get()); + verified_method.release(); // NOLINT b/117926937 + } else { + // TODO: Investigate why are we doing the work again for this method and try to avoid it. + LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod(); + if (!Runtime::Current()->UseJitCompilation()) { + if (kIsDebugBuild) { + auto ex_set = existing->GetSafeCastSet(); + auto ve_set = verified_method->GetSafeCastSet(); + CHECK_EQ(ex_set == nullptr, ve_set == nullptr); + CHECK((ex_set == nullptr) || (ex_set->size() == ve_set->size())); + } + } + // Let the unique_ptr delete the new verified method since there was already an existing one + // registered. It is unsafe to replace the existing one since the JIT may be using it to + // generate a native GC map. + } +} + +const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) const { + const VerifiedMethod* ret = nullptr; + if (atomic_verified_methods_.Get(ref, &ret)) { + return ret; + } + ReaderMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + return (it != verified_methods_.end()) ? it->second : nullptr; +} + +void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) { + // This method should only be called for classes verified at compile time, + // which have no verifier error, nor has methods that we know will throw + // at runtime. + std::unique_ptr verified_method = std::make_unique( + /* encountered_error_types= */ 0, /* has_runtime_throw= */ false); + if (atomic_verified_methods_.Insert(ref, + /*expected*/ nullptr, + verified_method.get()) == + AtomicMap::InsertResult::kInsertResultSuccess) { + verified_method.release(); // NOLINT b/117926937 + } +} + +void VerificationResults::AddRejectedClass(ClassReference ref) { + { + WriterMutexLock mu(Thread::Current(), rejected_classes_lock_); + rejected_classes_.insert(ref); + } + DCHECK(IsClassRejected(ref)); +} + +bool VerificationResults::IsClassRejected(ClassReference ref) const { + ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_); + return (rejected_classes_.find(ref) != rejected_classes_.end()); +} + +bool VerificationResults::IsCandidateForCompilation(MethodReference&, + const uint32_t access_flags) const { + if (!compiler_options_->IsAotCompilationEnabled()) { + return false; + } + // Don't compile class initializers unless kEverything. + if ((compiler_options_->GetCompilerFilter() != CompilerFilter::kEverything) && + ((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + return false; + } + return true; +} + +void VerificationResults::AddDexFile(const DexFile* dex_file) { + atomic_verified_methods_.AddDexFile(dex_file); + WriterMutexLock mu(Thread::Current(), verified_methods_lock_); + // There can be some verified methods that are already registered for the dex_file since we set + // up well known classes earlier. Remove these and put them in the array so that we don't + // accidentally miss seeing them. + for (auto it = verified_methods_.begin(); it != verified_methods_.end(); ) { + MethodReference ref = it->first; + if (ref.dex_file == dex_file) { + CHECK(atomic_verified_methods_.Insert(ref, nullptr, it->second) == + AtomicMap::kInsertResultSuccess); + it = verified_methods_.erase(it); + } else { + ++it; + } + } +} + +} // namespace art diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h new file mode 100644 index 0000000..04c4fa6 --- /dev/null +++ b/compiler/dex/verification_results.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ +#define ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ + +#include +#include + +#include "base/dchecked_vector.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "base/safe_map.h" +#include "dex/class_reference.h" +#include "dex/method_reference.h" +#include "utils/atomic_dex_ref_map.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +class VerifierDepsTest; +} // namespace verifier + +class CompilerOptions; +class VerifiedMethod; + +// Used by CompilerCallbacks to track verification information from the Runtime. +class VerificationResults { + public: + explicit VerificationResults(const CompilerOptions* compiler_options); + ~VerificationResults(); + + void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!verified_methods_lock_); + + void CreateVerifiedMethodFor(MethodReference ref) + REQUIRES(!verified_methods_lock_); + + const VerifiedMethod* GetVerifiedMethod(MethodReference ref) const + REQUIRES(!verified_methods_lock_); + + void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_); + bool IsClassRejected(ClassReference ref) const REQUIRES(!rejected_classes_lock_); + + bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags) const; + + // Add a dex file to enable using the atomic map. + void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_); + + private: + // Verified methods. The method array is fixed to avoid needing a lock to extend it. + using AtomicMap = AtomicDexRefMap; + using VerifiedMethodMap = SafeMap; + + VerifiedMethodMap verified_methods_ GUARDED_BY(verified_methods_lock_); + const CompilerOptions* const compiler_options_; + + // Dex2oat can add dex files to atomic_verified_methods_ to avoid locking when calling + // GetVerifiedMethod. + AtomicMap atomic_verified_methods_; + + // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation. + mutable ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + + // Rejected classes. + // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation. + mutable ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::set rejected_classes_ GUARDED_BY(rejected_classes_lock_); + + friend class verifier::VerifierDepsTest; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc new file mode 100644 index 0000000..172ec6b --- /dev/null +++ b/compiler/dex/verified_method.cc @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verified_method.h" + +#include +#include + +#include + +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file.h" +#include "dex/dex_instruction-inl.h" +#include "runtime.h" +#include "verifier/method_verifier-inl.h" +#include "verifier/reg_type-inl.h" +#include "verifier/register_line-inl.h" +#include "verifier/verifier_deps.h" + +namespace art { + +VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw) + : encountered_error_types_(encountered_error_types), + has_runtime_throw_(has_runtime_throw) { +} + +const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier) { + DCHECK(Runtime::Current()->IsAotCompiler()); + std::unique_ptr verified_method( + new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(), + method_verifier->HasInstructionThatWillThrow())); + + if (method_verifier->HasCheckCasts()) { + verified_method->GenerateSafeCastSet(method_verifier); + } + + return verified_method.release(); +} + +bool VerifiedMethod::IsSafeCast(uint32_t pc) const { + if (safe_cast_set_ == nullptr) { + return false; + } + return std::binary_search(safe_cast_set_->begin(), safe_cast_set_->end(), pc); +} + +void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) { + /* + * Walks over the method code and adds any cast instructions in which + * the type cast is implicit to a set, which is used in the code generation + * to elide these casts. + */ + if (method_verifier->HasFailures()) { + return; + } + for (const DexInstructionPcPair& pair : method_verifier->CodeItem()) { + const Instruction& inst = pair.Inst(); + const Instruction::Code code = inst.Opcode(); + if (code == Instruction::CHECK_CAST) { + const uint32_t dex_pc = pair.DexPc(); + if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) { + // Do not attempt to quicken this instruction, it's unreachable anyway. + continue; + } + const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + DCHECK(line != nullptr) << "Did not have line for dex pc 0x" << std::hex << dex_pc; + const verifier::RegType& reg_type(line->GetRegisterType(method_verifier, + inst.VRegA_21c())); + const verifier::RegType& cast_type = + method_verifier->ResolveCheckedClass(dex::TypeIndex(inst.VRegB_21c())); + // Pass null for the method verifier to not record the VerifierDeps dependency + // if the types are not assignable. + if (cast_type.IsStrictlyAssignableFrom(reg_type, /* verifier= */ nullptr)) { + // The types are assignable, we record that dependency in the VerifierDeps so + // that if this changes after OTA, we will re-verify again. + // We check if reg_type has a class, as the verifier may have inferred it's + // 'null'. + if (reg_type.HasClass()) { + DCHECK(cast_type.HasClass()); + verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(), + cast_type.GetClass(), + reg_type.GetClass(), + /* is_strict= */ true, + /* is_assignable= */ true); + } + if (safe_cast_set_ == nullptr) { + safe_cast_set_.reset(new SafeCastSet()); + } + // Verify ordering for push_back() to the sorted vector. + DCHECK(safe_cast_set_->empty() || safe_cast_set_->back() < dex_pc); + safe_cast_set_->push_back(dex_pc); + } + } + } + DCHECK(safe_cast_set_ == nullptr || !safe_cast_set_->empty()); +} + +} // namespace art diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h new file mode 100644 index 0000000..f04392d --- /dev/null +++ b/compiler/dex/verified_method.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFIED_METHOD_H_ +#define ART_COMPILER_DEX_VERIFIED_METHOD_H_ + +#include + +#include "base/mutex.h" +#include "base/safe_map.h" +#include "dex/dex_file.h" +#include "dex/method_reference.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class VerifiedMethod { + public: + VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw); + + // Cast elision set type. + // Since we're adding the dex PCs to the set in increasing order, a sorted vector + // is better for performance (not just memory usage), especially for large sets. + typedef std::vector SafeCastSet; + + static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier) + REQUIRES_SHARED(Locks::mutator_lock_); + ~VerifiedMethod() = default; + + const SafeCastSet* GetSafeCastSet() const { + return safe_cast_set_.get(); + } + + // Returns true if the cast can statically be verified to be redundant + // by using the check-cast elision peephole optimization in the verifier. + bool IsSafeCast(uint32_t pc) const; + + // Returns true if there were any errors during verification. + bool HasVerificationFailures() const { + return encountered_error_types_ != 0; + } + + uint32_t GetEncounteredVerificationFailures() const { + return encountered_error_types_; + } + + bool HasRuntimeThrow() const { + return has_runtime_throw_; + } + + private: + // Generate safe case set into safe_cast_set_. + void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) + REQUIRES_SHARED(Locks::mutator_lock_); + + std::unique_ptr safe_cast_set_; + + const uint32_t encountered_error_types_; + const bool has_runtime_throw_; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFIED_METHOD_H_ diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc new file mode 100644 index 0000000..03c906b --- /dev/null +++ b/compiler/driver/compiled_method_storage.cc @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "compiled_method_storage.h" + +#include + +#include "base/data_hash.h" +#include "base/utils.h" +#include "compiled_method.h" +#include "linker/linker_patch.h" +#include "thread-current-inl.h" +#include "utils/dedupe_set-inl.h" +#include "utils/swap_space.h" + +namespace art { + +namespace { // anonymous namespace + +template +const LengthPrefixedArray* CopyArray(SwapSpace* swap_space, const ArrayRef& array) { + DCHECK(!array.empty()); + SwapAllocator allocator(swap_space); + void* storage = allocator.allocate(LengthPrefixedArray::ComputeSize(array.size())); + LengthPrefixedArray* array_copy = new(storage) LengthPrefixedArray(array.size()); + std::copy(array.begin(), array.end(), array_copy->begin()); + return array_copy; +} + +template +void ReleaseArray(SwapSpace* swap_space, const LengthPrefixedArray* array) { + SwapAllocator allocator(swap_space); + size_t size = LengthPrefixedArray::ComputeSize(array->size()); + array->~LengthPrefixedArray(); + allocator.deallocate(const_cast(reinterpret_cast(array)), size); +} + +} // anonymous namespace + +template +inline const LengthPrefixedArray* CompiledMethodStorage::AllocateOrDeduplicateArray( + const ArrayRef& data, + DedupeSetType* dedupe_set) { + if (data.empty()) { + return nullptr; + } else if (!DedupeEnabled()) { + return CopyArray(swap_space_.get(), data); + } else { + return dedupe_set->Add(Thread::Current(), data); + } +} + +template +inline void CompiledMethodStorage::ReleaseArrayIfNotDeduplicated( + const LengthPrefixedArray* array) { + if (array != nullptr && !DedupeEnabled()) { + ReleaseArray(swap_space_.get(), array); + } +} + +template +class CompiledMethodStorage::DedupeHashFunc { + private: + static constexpr bool kUseMurmur3Hash = true; + + public: + size_t operator()(const ArrayRef& array) const { + return DataHash()(array); + } +}; + +template +class CompiledMethodStorage::LengthPrefixedArrayAlloc { + public: + explicit LengthPrefixedArrayAlloc(SwapSpace* swap_space) + : swap_space_(swap_space) { + } + + const LengthPrefixedArray* Copy(const ArrayRef& array) { + return CopyArray(swap_space_, array); + } + + void Destroy(const LengthPrefixedArray* array) { + ReleaseArray(swap_space_, array); + } + + private: + SwapSpace* const swap_space_; +}; + +class CompiledMethodStorage::ThunkMapKey { + public: + ThunkMapKey(linker::LinkerPatch::Type type, uint32_t custom_value1, uint32_t custom_value2) + : type_(type), custom_value1_(custom_value1), custom_value2_(custom_value2) {} + + bool operator<(const ThunkMapKey& other) const { + if (custom_value1_ != other.custom_value1_) { + return custom_value1_ < other.custom_value1_; + } + if (custom_value2_ != other.custom_value2_) { + return custom_value2_ < other.custom_value2_; + } + return type_ < other.type_; + } + + private: + linker::LinkerPatch::Type type_; + uint32_t custom_value1_; + uint32_t custom_value2_; +}; + +class CompiledMethodStorage::ThunkMapValue { + public: + ThunkMapValue(std::vector>&& code, + const std::string& debug_name) + : code_(std::move(code)), debug_name_(debug_name) {} + + ArrayRef GetCode() const { + return ArrayRef(code_); + } + + const std::string& GetDebugName() const { + return debug_name_; + } + + private: + std::vector> code_; + std::string debug_name_; +}; + +CompiledMethodStorage::CompiledMethodStorage(int swap_fd) + : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)), + dedupe_enabled_(true), + dedupe_code_("dedupe code", LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_vmap_table_("dedupe vmap table", + LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_linker_patches_("dedupe cfi info", + LengthPrefixedArrayAlloc(swap_space_.get())), + thunk_map_lock_("thunk_map_lock"), + thunk_map_(std::less(), SwapAllocator(swap_space_.get())) { +} + +CompiledMethodStorage::~CompiledMethodStorage() { + // All done by member destructors. +} + +void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) const { + if (swap_space_.get() != nullptr) { + const size_t swap_size = swap_space_->GetSize(); + os << " swap=" << PrettySize(swap_size) << " (" << swap_size << "B)"; + } + if (extended) { + Thread* self = Thread::Current(); + os << "\nCode dedupe: " << dedupe_code_.DumpStats(self); + os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self); + os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self); + } +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCode( + const ArrayRef& code) { + return AllocateOrDeduplicateArray(code, &dedupe_code_); +} + +void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray* code) { + ReleaseArrayIfNotDeduplicated(code); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateVMapTable( + const ArrayRef& table) { + return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_); +} + +void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray* table) { + ReleaseArrayIfNotDeduplicated(table); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCFIInfo( + const ArrayRef& cfi_info) { + return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_); +} + +void CompiledMethodStorage::ReleaseCFIInfo(const LengthPrefixedArray* cfi_info) { + ReleaseArrayIfNotDeduplicated(cfi_info); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateLinkerPatches( + const ArrayRef& linker_patches) { + return AllocateOrDeduplicateArray(linker_patches, &dedupe_linker_patches_); +} + +void CompiledMethodStorage::ReleaseLinkerPatches( + const LengthPrefixedArray* linker_patches) { + ReleaseArrayIfNotDeduplicated(linker_patches); +} + +CompiledMethodStorage::ThunkMapKey CompiledMethodStorage::GetThunkMapKey( + const linker::LinkerPatch& linker_patch) { + uint32_t custom_value1 = 0u; + uint32_t custom_value2 = 0u; + switch (linker_patch.GetType()) { + case linker::LinkerPatch::Type::kCallEntrypoint: + custom_value1 = linker_patch.EntrypointOffset(); + break; + case linker::LinkerPatch::Type::kBakerReadBarrierBranch: + custom_value1 = linker_patch.GetBakerCustomValue1(); + custom_value2 = linker_patch.GetBakerCustomValue2(); + break; + case linker::LinkerPatch::Type::kCallRelative: + // No custom values. + break; + default: + LOG(FATAL) << "Unexpected patch type: " << linker_patch.GetType(); + UNREACHABLE(); + } + return ThunkMapKey(linker_patch.GetType(), custom_value1, custom_value2); +} + +ArrayRef CompiledMethodStorage::GetThunkCode(const linker::LinkerPatch& linker_patch, + /*out*/ std::string* debug_name) { + ThunkMapKey key = GetThunkMapKey(linker_patch); + MutexLock lock(Thread::Current(), thunk_map_lock_); + auto it = thunk_map_.find(key); + if (it != thunk_map_.end()) { + const ThunkMapValue& value = it->second; + if (debug_name != nullptr) { + *debug_name = value.GetDebugName(); + } + return value.GetCode(); + } else { + if (debug_name != nullptr) { + *debug_name = std::string(); + } + return ArrayRef(); + } +} + +void CompiledMethodStorage::SetThunkCode(const linker::LinkerPatch& linker_patch, + ArrayRef code, + const std::string& debug_name) { + DCHECK(!code.empty()); + ThunkMapKey key = GetThunkMapKey(linker_patch); + std::vector> code_copy( + code.begin(), code.end(), SwapAllocator(swap_space_.get())); + ThunkMapValue value(std::move(code_copy), debug_name); + MutexLock lock(Thread::Current(), thunk_map_lock_); + // Note: Multiple threads can try and compile the same thunk, so this may not create a new entry. + thunk_map_.emplace(key, std::move(value)); +} + +} // namespace art diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h new file mode 100644 index 0000000..a5a7691 --- /dev/null +++ b/compiler/driver/compiled_method_storage.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ +#define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ + +#include +#include +#include + +#include "base/array_ref.h" +#include "base/length_prefixed_array.h" +#include "base/macros.h" +#include "utils/dedupe_set.h" +#include "utils/swap_space.h" + +namespace art { + +namespace linker { +class LinkerPatch; +} // namespace linker + +class CompiledMethodStorage { + public: + explicit CompiledMethodStorage(int swap_fd); + ~CompiledMethodStorage(); + + void DumpMemoryUsage(std::ostream& os, bool extended) const; + + void SetDedupeEnabled(bool dedupe_enabled) { + dedupe_enabled_ = dedupe_enabled; + } + bool DedupeEnabled() const { + return dedupe_enabled_; + } + + SwapAllocator GetSwapSpaceAllocator() { + return SwapAllocator(swap_space_.get()); + } + + const LengthPrefixedArray* DeduplicateCode(const ArrayRef& code); + void ReleaseCode(const LengthPrefixedArray* code); + + const LengthPrefixedArray* DeduplicateVMapTable(const ArrayRef& table); + void ReleaseVMapTable(const LengthPrefixedArray* table); + + const LengthPrefixedArray* DeduplicateCFIInfo(const ArrayRef& cfi_info); + void ReleaseCFIInfo(const LengthPrefixedArray* cfi_info); + + const LengthPrefixedArray* DeduplicateLinkerPatches( + const ArrayRef& linker_patches); + void ReleaseLinkerPatches(const LengthPrefixedArray* linker_patches); + + // Returns the code associated with the given patch. + // If the code has not been set, returns empty data. + // If `debug_name` is not null, stores the associated debug name in `*debug_name`. + ArrayRef GetThunkCode(const linker::LinkerPatch& linker_patch, + /*out*/ std::string* debug_name = nullptr); + + // Sets the code and debug name associated with the given patch. + void SetThunkCode(const linker::LinkerPatch& linker_patch, + ArrayRef code, + const std::string& debug_name); + + private: + class ThunkMapKey; + class ThunkMapValue; + using ThunkMapValueType = std::pair; + using ThunkMap = std::map, + SwapAllocator>; + static_assert(std::is_same::value, "Value type check."); + + static ThunkMapKey GetThunkMapKey(const linker::LinkerPatch& linker_patch); + + template + const LengthPrefixedArray* AllocateOrDeduplicateArray(const ArrayRef& data, + DedupeSetType* dedupe_set); + + template + void ReleaseArrayIfNotDeduplicated(const LengthPrefixedArray* array); + + // DeDuplication data structures. + template + class DedupeHashFunc; + + template + class LengthPrefixedArrayAlloc; + + template + using ArrayDedupeSet = DedupeSet, + LengthPrefixedArray, + LengthPrefixedArrayAlloc, + size_t, + DedupeHashFunc, + 4>; + + // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first + // as other fields rely on this. + std::unique_ptr swap_space_; + + bool dedupe_enabled_; + + ArrayDedupeSet dedupe_code_; + ArrayDedupeSet dedupe_vmap_table_; + ArrayDedupeSet dedupe_cfi_info_; + ArrayDedupeSet dedupe_linker_patches_; + + Mutex thunk_map_lock_; + ThunkMap thunk_map_ GUARDED_BY(thunk_map_lock_); + + DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage); +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc new file mode 100644 index 0000000..05eacd8 --- /dev/null +++ b/compiler/driver/compiled_method_storage_test.cc @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiled_method_storage.h" + +#include + +#include "compiled_method-inl.h" + +namespace art { + +TEST(CompiledMethodStorage, Deduplicate) { + CompiledMethodStorage storage(/* swap_fd= */ -1); + + ASSERT_TRUE(storage.DedupeEnabled()); // The default. + + const uint8_t raw_code1[] = { 1u, 2u, 3u }; + const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u }; + ArrayRef code[] = { + ArrayRef(raw_code1), + ArrayRef(raw_code2), + }; + const uint8_t raw_vmap_table1[] = { 2, 4, 6 }; + const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 }; + ArrayRef vmap_table[] = { + ArrayRef(raw_vmap_table1), + ArrayRef(raw_vmap_table2), + }; + const uint8_t raw_cfi_info1[] = { 1, 3, 5 }; + const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 }; + ArrayRef cfi_info[] = { + ArrayRef(raw_cfi_info1), + ArrayRef(raw_cfi_info2), + }; + const linker::LinkerPatch raw_patches1[] = { + linker::LinkerPatch::IntrinsicReferencePatch(0u, 0u, 0u), + linker::LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 1u), + }; + const linker::LinkerPatch raw_patches2[] = { + linker::LinkerPatch::IntrinsicReferencePatch(0u, 0u, 0u), + linker::LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 2u), + }; + ArrayRef patches[] = { + ArrayRef(raw_patches1), + ArrayRef(raw_patches2), + }; + + std::vector compiled_methods; + compiled_methods.reserve(1u << 4); + for (auto&& c : code) { + for (auto&& v : vmap_table) { + for (auto&& f : cfi_info) { + for (auto&& p : patches) { + compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( + &storage, InstructionSet::kNone, c, v, f, p)); + } + } + } + } + constexpr size_t code_bit = 1u << 3; + constexpr size_t vmap_table_bit = 1u << 2; + constexpr size_t cfi_info_bit = 1u << 1; + constexpr size_t patches_bit = 1u << 0; + CHECK_EQ(compiled_methods.size(), 1u << 4); + for (size_t i = 0; i != compiled_methods.size(); ++i) { + for (size_t j = 0; j != compiled_methods.size(); ++j) { + CompiledMethod* lhs = compiled_methods[i]; + CompiledMethod* rhs = compiled_methods[j]; + bool same_code = ((i ^ j) & code_bit) == 0u; + bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u; + bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u; + bool same_patches = ((i ^ j) & patches_bit) == 0u; + ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data()) + << i << " " << j; + ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data()) + << i << " " << j; + ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data()) + << i << " " << j; + ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data()) + << i << " " << j; + } + } + for (CompiledMethod* method : compiled_methods) { + CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&storage, method); + } +} + +} // namespace art diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc new file mode 100644 index 0000000..cde6ae9 --- /dev/null +++ b/compiler/driver/compiler_options.cc @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler_options.h" + +#include +#include + +#include "android-base/stringprintf.h" + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "base/runtime_debug.h" +#include "base/string_view_cpp20.h" +#include "base/variant_map.h" +#include "class_linker.h" +#include "cmdline_parser.h" +#include "compiler_options_map-inl.h" +#include "dex/dex_file-inl.h" +#include "dex/verification_results.h" +#include "dex/verified_method.h" +#include "runtime.h" +#include "scoped_thread_state_change-inl.h" +#include "simple_compiler_options_map.h" + +namespace art { + +CompilerOptions::CompilerOptions() + : compiler_filter_(CompilerFilter::kDefaultCompilerFilter), + huge_method_threshold_(kDefaultHugeMethodThreshold), + large_method_threshold_(kDefaultLargeMethodThreshold), + num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold), + inline_max_code_units_(kUnsetInlineMaxCodeUnits), + instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA), + instruction_set_features_(nullptr), + no_inline_from_(), + dex_files_for_oat_file_(), + image_classes_(), + verification_results_(nullptr), + image_type_(ImageType::kNone), + compiling_with_core_image_(false), + baseline_(false), + debuggable_(false), + generate_debug_info_(kDefaultGenerateDebugInfo), + generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo), + generate_build_id_(false), + implicit_null_checks_(true), + implicit_so_checks_(true), + implicit_suspend_checks_(false), + compile_pic_(false), + dump_timings_(false), + dump_pass_timings_(false), + dump_stats_(false), + top_k_profile_threshold_(kDefaultTopKProfileThreshold), + profile_compilation_info_(nullptr), + verbose_methods_(), + abort_on_hard_verifier_failure_(false), + abort_on_soft_verifier_failure_(false), + init_failure_output_(nullptr), + dump_cfg_file_name_(""), + dump_cfg_append_(false), + force_determinism_(false), + deduplicate_code_(true), + count_hotness_in_compiled_code_(false), + resolve_startup_const_strings_(false), + initialize_app_image_classes_(false), + check_profiled_methods_(ProfileMethodsCheck::kNone), + max_image_block_size_(std::numeric_limits::max()), + register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault), + passes_to_run_(nullptr) { +} + +CompilerOptions::~CompilerOptions() { + // Everything done by member destructors. + // The definitions of classes forward-declared in the header have now been #included. +} + +namespace { + +bool kEmitRuntimeReadBarrierChecks = kIsDebugBuild && + RegisterRuntimeDebugFlag(&kEmitRuntimeReadBarrierChecks); + +} // namespace + +bool CompilerOptions::EmitRunTimeChecksInDebugMode() const { + // Run-time checks (e.g. Marking Register checks) are only emitted in slow-debug mode. + return kEmitRuntimeReadBarrierChecks; +} + +bool CompilerOptions::ParseDumpInitFailures(const std::string& option, std::string* error_msg) { + init_failure_output_.reset(new std::ofstream(option)); + if (init_failure_output_.get() == nullptr) { + *error_msg = "Failed to construct std::ofstream"; + return false; + } else if (init_failure_output_->fail()) { + *error_msg = android::base::StringPrintf( + "Failed to open %s for writing the initialization failures.", option.c_str()); + init_failure_output_.reset(); + return false; + } + return true; +} + +bool CompilerOptions::ParseRegisterAllocationStrategy(const std::string& option, + std::string* error_msg) { + if (option == "linear-scan") { + register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan; + } else if (option == "graph-color") { + register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor; + } else { + *error_msg = "Unrecognized register allocation strategy. Try linear-scan, or graph-color."; + return false; + } + return true; +} + +bool CompilerOptions::ParseCompilerOptions(const std::vector& options, + bool ignore_unrecognized, + std::string* error_msg) { + auto parser = CreateSimpleParser(ignore_unrecognized); + CmdlineResult parse_result = parser.Parse(options); + if (!parse_result.IsSuccess()) { + *error_msg = parse_result.GetMessage(); + return false; + } + + SimpleParseArgumentMap args = parser.ReleaseArgumentsMap(); + return ReadCompilerOptions(args, this, error_msg); +} + +bool CompilerOptions::IsImageClass(const char* descriptor) const { + // Historical note: We used to hold the set indirectly and there was a distinction between an + // empty set and a null, null meaning to include all classes. However, the distiction has been + // removed; if we don't have a profile, we treat it as an empty set of classes. b/77340429 + return image_classes_.find(std::string_view(descriptor)) != image_classes_.end(); +} + +const VerificationResults* CompilerOptions::GetVerificationResults() const { + DCHECK(Runtime::Current()->IsAotCompiler()); + return verification_results_; +} + +const VerifiedMethod* CompilerOptions::GetVerifiedMethod(const DexFile* dex_file, + uint32_t method_idx) const { + MethodReference ref(dex_file, method_idx); + return verification_results_->GetVerifiedMethod(ref); +} + +bool CompilerOptions::IsMethodVerifiedWithoutFailures(uint32_t method_idx, + uint16_t class_def_idx, + const DexFile& dex_file) const { + const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx); + if (verified_method != nullptr) { + return !verified_method->HasVerificationFailures(); + } + + // If we can't find verification metadata, check if this is a system class (we trust that system + // classes have their methods verified). If it's not, be conservative and assume the method + // has not been verified successfully. + + // TODO: When compiling the boot image it should be safe to assume that everything is verified, + // even if methods are not found in the verification cache. + const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx)); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr; + if (!is_system_class) { + self->ClearException(); + } + return is_system_class; +} + +bool CompilerOptions::IsCoreImageFilename(const std::string& boot_image_filename) { + std::string_view filename(boot_image_filename); + size_t colon_pos = filename.find(':'); + if (colon_pos != std::string_view::npos) { + filename = filename.substr(0u, colon_pos); + } + // Look for "core.art" or "core-*.art". + if (EndsWith(filename, "core.art")) { + return true; + } + if (!EndsWith(filename, ".art")) { + return false; + } + size_t slash_pos = filename.rfind('/'); + if (slash_pos == std::string::npos) { + return StartsWith(filename, "core-"); + } + return filename.compare(slash_pos + 1, 5u, "core-") == 0; +} + +} // namespace art diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h new file mode 100644 index 0000000..79ba1c2 --- /dev/null +++ b/compiler/driver/compiler_options.h @@ -0,0 +1,481 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ + +#include +#include +#include +#include + +#include "base/globals.h" +#include "base/hash_set.h" +#include "base/macros.h" +#include "base/utils.h" +#include "compiler_filter.h" +#include "optimizing/register_allocator.h" + +namespace art { + +namespace jit { +class JitCompiler; +} // namespace jit + +namespace verifier { +class VerifierDepsTest; +} // namespace verifier + +namespace linker { +class Arm64RelativePatcherTest; +} // namespace linker + +class DexFile; +enum class InstructionSet; +class InstructionSetFeatures; +class ProfileCompilationInfo; +class VerificationResults; +class VerifiedMethod; + +// Enum for CheckProfileMethodsCompiled. Outside CompilerOptions so it can be forward-declared. +enum class ProfileMethodsCheck : uint8_t { + kNone, + kLog, + kAbort, +}; + +class CompilerOptions final { + public: + // Guide heuristics to determine whether to compile method if profile data not available. + static const size_t kDefaultHugeMethodThreshold = 10000; + static const size_t kDefaultLargeMethodThreshold = 600; + static const size_t kDefaultNumDexMethodsThreshold = 900; + static constexpr double kDefaultTopKProfileThreshold = 90.0; + static const bool kDefaultGenerateDebugInfo = false; + static const bool kDefaultGenerateMiniDebugInfo = false; + static const size_t kDefaultInlineMaxCodeUnits = 32; + static constexpr size_t kUnsetInlineMaxCodeUnits = -1; + + enum class ImageType : uint8_t { + kNone, // JIT or AOT app compilation producing only an oat file but no image. + kBootImage, // Creating boot image. + kBootImageExtension, // Creating boot image extension. + kAppImage, // Creating app image. + }; + + CompilerOptions(); + ~CompilerOptions(); + + CompilerFilter::Filter GetCompilerFilter() const { + return compiler_filter_; + } + + void SetCompilerFilter(CompilerFilter::Filter compiler_filter) { + compiler_filter_ = compiler_filter; + } + + bool IsAotCompilationEnabled() const { + return CompilerFilter::IsAotCompilationEnabled(compiler_filter_); + } + + bool IsJniCompilationEnabled() const { + return CompilerFilter::IsJniCompilationEnabled(compiler_filter_); + } + + bool IsQuickeningCompilationEnabled() const { + return CompilerFilter::IsQuickeningCompilationEnabled(compiler_filter_); + } + + bool IsVerificationEnabled() const { + return CompilerFilter::IsVerificationEnabled(compiler_filter_); + } + + bool AssumeDexFilesAreVerified() const { + return compiler_filter_ == CompilerFilter::kAssumeVerified; + } + + bool AssumeClassesAreVerified() const { + return compiler_filter_ == CompilerFilter::kAssumeVerified; + } + + bool VerifyAtRuntime() const { + return compiler_filter_ == CompilerFilter::kExtract; + } + + bool IsAnyCompilationEnabled() const { + return CompilerFilter::IsAnyCompilationEnabled(compiler_filter_); + } + + size_t GetHugeMethodThreshold() const { + return huge_method_threshold_; + } + + size_t GetLargeMethodThreshold() const { + return large_method_threshold_; + } + + bool IsHugeMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > huge_method_threshold_; + } + + bool IsLargeMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > large_method_threshold_; + } + + size_t GetNumDexMethodsThreshold() const { + return num_dex_methods_threshold_; + } + + size_t GetInlineMaxCodeUnits() const { + return inline_max_code_units_; + } + void SetInlineMaxCodeUnits(size_t units) { + inline_max_code_units_ = units; + } + + double GetTopKProfileThreshold() const { + return top_k_profile_threshold_; + } + + bool GetDebuggable() const { + return debuggable_; + } + + void SetDebuggable(bool value) { + debuggable_ = value; + } + + bool GetNativeDebuggable() const { + return GetDebuggable() && GetGenerateDebugInfo(); + } + + // This flag controls whether the compiler collects debugging information. + // The other flags control how the information is written to disk. + bool GenerateAnyDebugInfo() const { + return GetGenerateDebugInfo() || GetGenerateMiniDebugInfo(); + } + + bool GetGenerateDebugInfo() const { + return generate_debug_info_; + } + + bool GetGenerateMiniDebugInfo() const { + return generate_mini_debug_info_; + } + + // Should run-time checks be emitted in debug mode? + bool EmitRunTimeChecksInDebugMode() const; + + bool GetGenerateBuildId() const { + return generate_build_id_; + } + + bool GetImplicitNullChecks() const { + return implicit_null_checks_; + } + + bool GetImplicitStackOverflowChecks() const { + return implicit_so_checks_; + } + + bool GetImplicitSuspendChecks() const { + return implicit_suspend_checks_; + } + + bool IsGeneratingImage() const { + return IsBootImage() || IsBootImageExtension() || IsAppImage(); + } + + // Are we compiling a boot image? + bool IsBootImage() const { + return image_type_ == ImageType::kBootImage; + } + + // Are we compiling a boot image extension? + bool IsBootImageExtension() const { + return image_type_ == ImageType::kBootImageExtension; + } + + bool IsBaseline() const { + return baseline_; + } + + // Are we compiling an app image? + bool IsAppImage() const { + return image_type_ == ImageType::kAppImage; + } + + // Returns whether we are compiling against a "core" image, which + // is an indicative we are running tests. The compiler will use that + // information for checking invariants. + bool CompilingWithCoreImage() const { + return compiling_with_core_image_; + } + + // Should the code be compiled as position independent? + bool GetCompilePic() const { + return compile_pic_; + } + + const ProfileCompilationInfo* GetProfileCompilationInfo() const { + return profile_compilation_info_; + } + + bool HasVerboseMethods() const { + return !verbose_methods_.empty(); + } + + bool IsVerboseMethod(const std::string& pretty_method) const { + for (const std::string& cur_method : verbose_methods_) { + if (pretty_method.find(cur_method) != std::string::npos) { + return true; + } + } + return false; + } + + std::ostream* GetInitFailureOutput() const { + return init_failure_output_.get(); + } + + bool AbortOnHardVerifierFailure() const { + return abort_on_hard_verifier_failure_; + } + bool AbortOnSoftVerifierFailure() const { + return abort_on_soft_verifier_failure_; + } + + InstructionSet GetInstructionSet() const { + return instruction_set_; + } + + const InstructionSetFeatures* GetInstructionSetFeatures() const { + return instruction_set_features_.get(); + } + + + const std::vector& GetNoInlineFromDexFile() const { + return no_inline_from_; + } + + const std::vector& GetDexFilesForOatFile() const { + return dex_files_for_oat_file_; + } + + const HashSet& GetImageClasses() const { + return image_classes_; + } + + bool IsImageClass(const char* descriptor) const; + + const VerificationResults* GetVerificationResults() const; + + const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; + + // Checks if the specified method has been verified without failures. Returns + // false if the method is not in the verification results (GetVerificationResults). + bool IsMethodVerifiedWithoutFailures(uint32_t method_idx, + uint16_t class_def_idx, + const DexFile& dex_file) const; + + bool ParseCompilerOptions(const std::vector& options, + bool ignore_unrecognized, + std::string* error_msg); + + void SetNonPic() { + compile_pic_ = false; + } + + const std::string& GetDumpCfgFileName() const { + return dump_cfg_file_name_; + } + + bool GetDumpCfgAppend() const { + return dump_cfg_append_; + } + + bool IsForceDeterminism() const { + return force_determinism_; + } + + bool DeduplicateCode() const { + return deduplicate_code_; + } + + RegisterAllocator::Strategy GetRegisterAllocationStrategy() const { + return register_allocation_strategy_; + } + + const std::vector* GetPassesToRun() const { + return passes_to_run_; + } + + bool GetDumpTimings() const { + return dump_timings_; + } + + bool GetDumpPassTimings() const { + return dump_pass_timings_; + } + + bool GetDumpStats() const { + return dump_stats_; + } + + bool CountHotnessInCompiledCode() const { + return count_hotness_in_compiled_code_; + } + + bool ResolveStartupConstStrings() const { + return resolve_startup_const_strings_; + } + + ProfileMethodsCheck CheckProfiledMethodsCompiled() const { + return check_profiled_methods_; + } + + uint32_t MaxImageBlockSize() const { + return max_image_block_size_; + } + + void SetMaxImageBlockSize(uint32_t size) { + max_image_block_size_ = size; + } + + bool InitializeAppImageClasses() const { + return initialize_app_image_classes_; + } + + // Is `boot_image_filename` the name of a core image (small boot + // image used for ART testing only)? + static bool IsCoreImageFilename(const std::string& boot_image_filename); + + private: + bool ParseDumpInitFailures(const std::string& option, std::string* error_msg); + bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg); + + CompilerFilter::Filter compiler_filter_; + size_t huge_method_threshold_; + size_t large_method_threshold_; + size_t num_dex_methods_threshold_; + size_t inline_max_code_units_; + + InstructionSet instruction_set_; + std::unique_ptr instruction_set_features_; + + // Dex files from which we should not inline code. Does not own the dex files. + // This is usually a very short list (i.e. a single dex file), so we + // prefer vector<> over a lookup-oriented container, such as set<>. + std::vector no_inline_from_; + + // List of dex files associated with the oat file, empty for JIT. + std::vector dex_files_for_oat_file_; + + // Image classes, specifies the classes that will be included in the image if creating an image. + // Must not be empty for real boot image, only for tests pretending to compile boot image. + HashSet image_classes_; + + // Results of AOT verification. + const VerificationResults* verification_results_; + + ImageType image_type_; + bool compiling_with_core_image_; + bool baseline_; + bool debuggable_; + bool generate_debug_info_; + bool generate_mini_debug_info_; + bool generate_build_id_; + bool implicit_null_checks_; + bool implicit_so_checks_; + bool implicit_suspend_checks_; + bool compile_pic_; + bool dump_timings_; + bool dump_pass_timings_; + bool dump_stats_; + + // When using a profile file only the top K% of the profiled samples will be compiled. + double top_k_profile_threshold_; + + // Info for profile guided compilation. + const ProfileCompilationInfo* profile_compilation_info_; + + // Vector of methods to have verbose output enabled for. + std::vector verbose_methods_; + + // Abort compilation with an error if we find a class that fails verification with a hard + // failure. + bool abort_on_hard_verifier_failure_; + // Same for soft failures. + bool abort_on_soft_verifier_failure_; + + // Log initialization of initialization failures to this stream if not null. + std::unique_ptr init_failure_output_; + + std::string dump_cfg_file_name_; + bool dump_cfg_append_; + + // Whether the compiler should trade performance for determinism to guarantee exactly reproducible + // outcomes. + bool force_determinism_; + + // Whether code should be deduplicated. + bool deduplicate_code_; + + // Whether compiled code should increment the hotness count of ArtMethod. Note that the increments + // won't be atomic for performance reasons, so we accept races, just like in interpreter. + bool count_hotness_in_compiled_code_; + + // Whether we eagerly resolve all of the const strings that are loaded from startup methods in the + // profile. + bool resolve_startup_const_strings_; + + // Whether we attempt to run class initializers for app image classes. + bool initialize_app_image_classes_; + + // When running profile-guided compilation, check that methods intended to be compiled end + // up compiled and are not punted. + ProfileMethodsCheck check_profiled_methods_; + + // Maximum solid block size in the generated image. + uint32_t max_image_block_size_; + + RegisterAllocator::Strategy register_allocation_strategy_; + + // If not null, specifies optimization passes which will be run instead of defaults. + // Note that passes_to_run_ is not checked for correctness and providing an incorrect + // list of passes can lead to unexpected compiler behaviour. This is caused by dependencies + // between passes. Failing to satisfy them can for example lead to compiler crashes. + // Passing pass names which are not recognized by the compiler will result in + // compiler-dependant behavior. + const std::vector* passes_to_run_; + + friend class Dex2Oat; + friend class DexToDexDecompilerTest; + friend class CommonCompilerDriverTest; + friend class CommonCompilerTest; + friend class jit::JitCompiler; + friend class verifier::VerifierDepsTest; + friend class linker::Arm64RelativePatcherTest; + + template + friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg); + + DISALLOW_COPY_AND_ASSIGN(CompilerOptions); +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h new file mode 100644 index 0000000..e8a425d --- /dev/null +++ b/compiler/driver/compiler_options_map-inl.h @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ + +#include "compiler_options_map.h" + +#include + +#include "android-base/logging.h" +#include "android-base/macros.h" +#include "android-base/stringprintf.h" + +#include "base/macros.h" +#include "cmdline_parser.h" +#include "compiler_options.h" + +namespace art { + +template +inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg) { + if (map.Exists(Base::CompilerFilter)) { + CompilerFilter::Filter compiler_filter; + if (!CompilerFilter::ParseCompilerFilter(map.Get(Base::CompilerFilter)->c_str(), + &compiler_filter)) { + *error_msg = android::base::StringPrintf("Unknown --compiler-filter value %s", + map.Get(Base::CompilerFilter)->c_str()); + return false; + } + options->SetCompilerFilter(compiler_filter); + } + map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_); + map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_); + map.AssignIfExists(Base::NumDexMethodsThreshold, &options->num_dex_methods_threshold_); + map.AssignIfExists(Base::InlineMaxCodeUnitsThreshold, &options->inline_max_code_units_); + map.AssignIfExists(Base::GenerateDebugInfo, &options->generate_debug_info_); + map.AssignIfExists(Base::GenerateMiniDebugInfo, &options->generate_mini_debug_info_); + map.AssignIfExists(Base::GenerateBuildID, &options->generate_build_id_); + if (map.Exists(Base::Debuggable)) { + options->debuggable_ = true; + } + if (map.Exists(Base::Baseline)) { + options->baseline_ = true; + } + map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_); + map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_); + map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_); + if (map.Exists(Base::DumpInitFailures)) { + if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) { + return false; + } + } + map.AssignIfExists(Base::DumpCFG, &options->dump_cfg_file_name_); + if (map.Exists(Base::DumpCFGAppend)) { + options->dump_cfg_append_ = true; + } + if (map.Exists(Base::RegisterAllocationStrategy)) { + if (!options->ParseRegisterAllocationStrategy(*map.Get(Base::DumpInitFailures), error_msg)) { + return false; + } + } + map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_); + options->deduplicate_code_ = map.GetOrDefault(Base::DeduplicateCode); + if (map.Exists(Base::CountHotnessInCompiledCode)) { + options->count_hotness_in_compiled_code_ = true; + } + map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_); + map.AssignIfExists(Base::InitializeAppImageClasses, &options->initialize_app_image_classes_); + if (map.Exists(Base::CheckProfiledMethods)) { + options->check_profiled_methods_ = *map.Get(Base::CheckProfiledMethods); + } + map.AssignIfExists(Base::MaxImageBlockSize, &options->max_image_block_size_); + + if (map.Exists(Base::DumpTimings)) { + options->dump_timings_ = true; + } + + if (map.Exists(Base::DumpPassTimings)) { + options->dump_pass_timings_ = true; + } + + if (map.Exists(Base::DumpStats)) { + options->dump_stats_ = true; + } + + return true; +} + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wframe-larger-than=" + +template +inline void AddCompilerOptionsArgumentParserOptions(Builder& b) { + b. + Define("--compiler-filter=_") + .template WithType() + .IntoKey(Map::CompilerFilter) + + .Define("--huge-method-max=_") + .template WithType() + .IntoKey(Map::HugeMethodMaxThreshold) + .Define("--large-method-max=_") + .template WithType() + .IntoKey(Map::LargeMethodMaxThreshold) + .Define("--num-dex-methods=_") + .template WithType() + .IntoKey(Map::NumDexMethodsThreshold) + .Define("--inline-max-code-units=_") + .template WithType() + .IntoKey(Map::InlineMaxCodeUnitsThreshold) + + .Define({"--generate-debug-info", "-g", "--no-generate-debug-info"}) + .WithValues({true, true, false}) + .IntoKey(Map::GenerateDebugInfo) + .Define({"--generate-mini-debug-info", "--no-generate-mini-debug-info"}) + .WithValues({true, false}) + .IntoKey(Map::GenerateMiniDebugInfo) + + .Define({"--generate-build-id", "--no-generate-build-id"}) + .WithValues({true, false}) + .IntoKey(Map::GenerateBuildID) + + .Define({"--deduplicate-code=_"}) + .template WithType() + .WithValueMap({{"false", false}, {"true", true}}) + .IntoKey(Map::DeduplicateCode) + + .Define({"--count-hotness-in-compiled-code"}) + .IntoKey(Map::CountHotnessInCompiledCode) + + .Define({"--check-profiled-methods=_"}) + .template WithType() + .WithValueMap({{"log", ProfileMethodsCheck::kLog}, + {"abort", ProfileMethodsCheck::kAbort}}) + .IntoKey(Map::CheckProfiledMethods) + + .Define({"--dump-timings"}) + .IntoKey(Map::DumpTimings) + + .Define({"--dump-pass-timings"}) + .IntoKey(Map::DumpPassTimings) + + .Define({"--dump-stats"}) + .IntoKey(Map::DumpStats) + + .Define("--debuggable") + .IntoKey(Map::Debuggable) + + .Define("--baseline") + .IntoKey(Map::Baseline) + + .Define("--top-k-profile-threshold=_") + .template WithType().WithRange(0.0, 100.0) + .IntoKey(Map::TopKProfileThreshold) + + .Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"}) + .WithValues({true, false}) + .IntoKey(Map::AbortOnHardVerifierFailure) + .Define({"--abort-on-soft-verifier-error", "--no-abort-on-soft-verifier-error"}) + .WithValues({true, false}) + .IntoKey(Map::AbortOnSoftVerifierFailure) + + .Define("--dump-init-failures=_") + .template WithType() + .IntoKey(Map::DumpInitFailures) + + .Define("--dump-cfg=_") + .template WithType() + .IntoKey(Map::DumpCFG) + .Define("--dump-cfg-append") + .IntoKey(Map::DumpCFGAppend) + + .Define("--register-allocation-strategy=_") + .template WithType() + .IntoKey(Map::RegisterAllocationStrategy) + + .Define("--resolve-startup-const-strings=_") + .template WithType() + .WithValueMap({{"false", false}, {"true", true}}) + .IntoKey(Map::ResolveStartupConstStrings) + + .Define("--initialize-app-image-classes=_") + .template WithType() + .WithValueMap({{"false", false}, {"true", true}}) + .IntoKey(Map::InitializeAppImageClasses) + + .Define("--verbose-methods=_") + .template WithType>() + .IntoKey(Map::VerboseMethods) + + .Define("--max-image-block-size=_") + .template WithType() + .IntoKey(Map::MaxImageBlockSize); +} + +#pragma GCC diagnostic pop + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_ diff --git a/compiler/driver/compiler_options_map-storage.h b/compiler/driver/compiler_options_map-storage.h new file mode 100644 index 0000000..01f32e0 --- /dev/null +++ b/compiler/driver/compiler_options_map-storage.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ + +// Assumes: +// * #include "compiler_options_map.h" +// * namespace art +// +// Usage: +// #define COMPILER_OPTIONS_MAP_TYPE TheTypeOfTheMap +// #define COMPILER_OPTIONS_MAP_KEY_TYPE TheTypeOfTheMapsKey +// #include "driver/compiler_options_map-storage.h + +#ifndef COMPILER_OPTIONS_MAP_TYPE +#error "Expected COMPILER_OPTIONS_MAP_TYPE" +#endif + +#ifndef COMPILER_OPTIONS_MAP_KEY_TYPE +#error "Expected COMPILER_OPTIONS_MAP_KEY_TYPE" +#endif + +#define COMPILER_OPTIONS_KEY(Type, Name, ...) \ + template class KeyType> \ + const KeyType CompilerOptionsMap::Name {__VA_ARGS__}; +#include + +template struct CompilerOptionsMap; + +#undef COMPILER_OPTIONS_MAP_TYPE +#undef COMPILER_OPTIONS_MAP_KEY_TYPE + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ +#undef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ // Guard is only for cpplint diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def new file mode 100644 index 0000000..df06bd8 --- /dev/null +++ b/compiler/driver/compiler_options_map.def @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMPILER_OPTIONS_KEY +#error "Please #define COMPILER_OPTIONS_KEY before #including this file" +#define COMPILER_OPTIONS_KEY(...) // Don't display errors in this file in IDEs. +#endif + +// This file defines the list of keys for CompilerOptionsMap. +// These can be used with CompilerOptionsMap.Get/Set/etc, once that template class has been +// instantiated. +// +// Column Descriptions: +// <> <> (<>) +// +// Default values are only used by Map::GetOrDefault(K). +// If a default value is omitted here, T{} is used as the default value, which is +// almost-always the value of the type as if it was memset to all 0. +// +// Please keep the columns aligned if possible when adding new rows. +// + +// Parse-able keys from the command line. + +// TODO: Add type parser. +COMPILER_OPTIONS_KEY (std::string, CompilerFilter) +COMPILER_OPTIONS_KEY (Unit, PIC) +COMPILER_OPTIONS_KEY (unsigned int, HugeMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, LargeMethodMaxThreshold) +COMPILER_OPTIONS_KEY (unsigned int, NumDexMethodsThreshold) +COMPILER_OPTIONS_KEY (unsigned int, InlineMaxCodeUnitsThreshold) +COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo) +COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo) +COMPILER_OPTIONS_KEY (bool, GenerateBuildID) +COMPILER_OPTIONS_KEY (Unit, Debuggable) +COMPILER_OPTIONS_KEY (Unit, Baseline) +COMPILER_OPTIONS_KEY (double, TopKProfileThreshold) +COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure) +COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure) +COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false) +COMPILER_OPTIONS_KEY (bool, InitializeAppImageClasses, false) +COMPILER_OPTIONS_KEY (std::string, DumpInitFailures) +COMPILER_OPTIONS_KEY (std::string, DumpCFG) +COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend) +// TODO: Add type parser. +COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy) +COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods) +COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true) +COMPILER_OPTIONS_KEY (Unit, CountHotnessInCompiledCode) +COMPILER_OPTIONS_KEY (ProfileMethodsCheck, CheckProfiledMethods) +COMPILER_OPTIONS_KEY (Unit, DumpTimings) +COMPILER_OPTIONS_KEY (Unit, DumpPassTimings) +COMPILER_OPTIONS_KEY (Unit, DumpStats) +COMPILER_OPTIONS_KEY (unsigned int, MaxImageBlockSize) + +#undef COMPILER_OPTIONS_KEY diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h new file mode 100644 index 0000000..af212d6 --- /dev/null +++ b/compiler/driver/compiler_options_map.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ + +#include +#include + +#include "base/variant_map.h" +#include "cmdline_types.h" + +namespace art { + +enum class ProfileMethodsCheck : uint8_t; + +// Defines a type-safe heterogeneous key->value map. This is to be used as the base for +// an extended map. +template class KeyType> +struct CompilerOptionsMap : VariantMap { + // Make the next many usages of Key slightly shorter to type. + template + using Key = KeyType; + + // List of key declarations, shorthand for 'static const Key Name' +#define COMPILER_OPTIONS_KEY(Type, Name, ...) static const Key (Name); +#include "compiler_options_map.def" +}; + +#undef DECLARE_KEY + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_ diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc new file mode 100644 index 0000000..0d0f074 --- /dev/null +++ b/compiler/driver/dex_compilation_unit.cc @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_compilation_unit.h" + +#include "art_field.h" +#include "base/utils.h" +#include "dex/class_accessor-inl.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/descriptors_names.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "scoped_thread_state_change-inl.h" + +namespace art { + +DexCompilationUnit::DexCompilationUnit(Handle class_loader, + ClassLinker* class_linker, + const DexFile& dex_file, + const dex::CodeItem* code_item, + uint16_t class_def_idx, + uint32_t method_idx, + uint32_t access_flags, + const VerifiedMethod* verified_method, + Handle dex_cache, + Handle compiling_class) + : class_loader_(class_loader), + class_linker_(class_linker), + dex_file_(&dex_file), + code_item_(code_item), + class_def_idx_(class_def_idx), + dex_method_idx_(method_idx), + access_flags_(access_flags), + verified_method_(verified_method), + dex_cache_(dex_cache), + code_item_accessor_(dex_file, code_item), + compiling_class_(compiling_class) {} + +const std::string& DexCompilationUnit::GetSymbol() { + if (symbol_.empty()) { + symbol_ = "dex_"; + symbol_ += MangleForJni(dex_file_->PrettyMethod(dex_method_idx_)); + } + return symbol_; +} + +bool DexCompilationUnit::RequiresConstructorBarrier() const { + // Constructor barriers are applicable only for methods. + DCHECK(!IsStatic()); + DCHECK(IsConstructor()); + + // We require a constructor barrier if there are final instance fields. + if (GetCompilingClass().GetReference() != nullptr && !GetCompilingClass().IsNull()) { + // Decoding class data can be slow, so iterate over fields of the compiling class if resolved. + ScopedObjectAccess soa(Thread::Current()); + ObjPtr compiling_class = GetCompilingClass().Get(); + for (size_t i = 0, size = compiling_class->NumInstanceFields(); i != size; ++i) { + ArtField* field = compiling_class->GetInstanceField(i); + if (field->IsFinal()) { + return true; + } + } + } else { + // Iterate over field definitions in the class data. + ClassAccessor accessor(*GetDexFile(), GetClassDefIndex()); + for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) { + if (field.IsFinal()) { + return true; + } + } + } + return false; +} + +} // namespace art diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h new file mode 100644 index 0000000..def90fa --- /dev/null +++ b/compiler/driver/dex_compilation_unit.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ +#define ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ + +#include + +#include "base/arena_object.h" +#include "dex/code_item_accessors.h" +#include "dex/dex_file.h" +#include "handle.h" + +namespace art { +namespace mirror { +class Class; +class ClassLoader; +class DexCache; +} // namespace mirror +class ClassLinker; +class VerifiedMethod; + +class DexCompilationUnit : public DeletableArenaObject { + public: + DexCompilationUnit(Handle class_loader, + ClassLinker* class_linker, + const DexFile& dex_file, + const dex::CodeItem* code_item, + uint16_t class_def_idx, + uint32_t method_idx, + uint32_t access_flags, + const VerifiedMethod* verified_method, + Handle dex_cache, + Handle compiling_class = Handle()); + + Handle GetClassLoader() const { + return class_loader_; + } + + ClassLinker* GetClassLinker() const { + return class_linker_; + } + + const DexFile* GetDexFile() const { + return dex_file_; + } + + uint16_t GetClassDefIndex() const { + return class_def_idx_; + } + + uint32_t GetDexMethodIndex() const { + return dex_method_idx_; + } + + const dex::CodeItem* GetCodeItem() const { + return code_item_; + } + + const char* GetShorty() const { + const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); + return dex_file_->GetMethodShorty(method_id); + } + + const char* GetShorty(uint32_t* shorty_len) const { + const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); + return dex_file_->GetMethodShorty(method_id, shorty_len); + } + + uint32_t GetAccessFlags() const { + return access_flags_; + } + + bool IsConstructor() const { + return ((access_flags_ & kAccConstructor) != 0); + } + + bool IsNative() const { + return ((access_flags_ & kAccNative) != 0); + } + + bool IsStatic() const { + return ((access_flags_ & kAccStatic) != 0); + } + + bool IsSynchronized() const { + return ((access_flags_ & kAccSynchronized) != 0); + } + + const VerifiedMethod* GetVerifiedMethod() const { + return verified_method_; + } + + void ClearVerifiedMethod() { + verified_method_ = nullptr; + } + + const std::string& GetSymbol(); + + Handle GetDexCache() const { + return dex_cache_; + } + + const CodeItemDataAccessor& GetCodeItemAccessor() const { + return code_item_accessor_; + } + + Handle GetCompilingClass() const { + return compiling_class_; + } + + // Does this method require a constructor barrier (prior to the return)? + // The answer is "yes", if and only if the class has any instance final fields. + // (This must not be called for any non- methods; the answer would be "no"). + // + // --- + // + // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end + // of the invoked constructor. The constructor barrier is a conservative implementation means of + // enforcing the freezes happen-before the object being constructed is observable by another + // thread. + // + // Note: This question only makes sense for instance constructors; + // static constructors (despite possibly having finals) never need + // a barrier. + // + // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes + // class initialization as: + // + // lock(class.lock) + // class.state = initializing + // unlock(class.lock) + // + // invoke + // + // lock(class.lock) + // class.state = initialized + // unlock(class.lock) <-- acts as a release + // + // The last operation in the above example acts as an atomic release + // for any stores in , which ends up being stricter + // than what a constructor barrier needs. + // + // See also QuasiAtomic::ThreadFenceForConstructor(). + bool RequiresConstructorBarrier() const; + + private: + const Handle class_loader_; + + ClassLinker* const class_linker_; + + const DexFile* const dex_file_; + + const dex::CodeItem* const code_item_; + const uint16_t class_def_idx_; + const uint32_t dex_method_idx_; + const uint32_t access_flags_; + const VerifiedMethod* verified_method_; + + const Handle dex_cache_; + + const CodeItemDataAccessor code_item_accessor_; + + Handle compiling_class_; + + std::string symbol_; +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h new file mode 100644 index 0000000..e7a51a4 --- /dev/null +++ b/compiler/driver/simple_compiler_options_map.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file declares a completion of the CompilerOptionsMap and should be included into a +// .cc file, only. + +#ifndef ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ +#define ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ + +#include + +#include "compiler_options_map-inl.h" +#include "base/variant_map.h" + +namespace art { + +template +struct SimpleParseArgumentMapKey : VariantMapKey { + SimpleParseArgumentMapKey() {} + explicit SimpleParseArgumentMapKey(TValue default_value) + : VariantMapKey(std::move(default_value)) {} + // Don't ODR-use constexpr default values, which means that Struct::Fields + // that are declared 'static constexpr T Name = Value' don't need to have a matching definition. +}; + +struct SimpleParseArgumentMap : CompilerOptionsMap { + // This 'using' line is necessary to inherit the variadic constructor. + using CompilerOptionsMap::CompilerOptionsMap; +}; + +#define COMPILER_OPTIONS_MAP_TYPE SimpleParseArgumentMap +#define COMPILER_OPTIONS_MAP_KEY_TYPE SimpleParseArgumentMapKey +#include "compiler_options_map-storage.h" + +using Parser = CmdlineParser; + +static inline Parser CreateSimpleParser(bool ignore_unrecognized) { + std::unique_ptr parser_builder = + std::make_unique(); + + AddCompilerOptionsArgumentParserOptions(*parser_builder); + + parser_builder->IgnoreUnrecognized(ignore_unrecognized); + + return parser_builder->Build(); +} + +} // namespace art + +#endif // ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_ diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc new file mode 100644 index 0000000..633e124 --- /dev/null +++ b/compiler/exception_test.cc @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "base/arena_allocator.h" +#include "base/callee_save_type.h" +#include "base/enums.h" +#include "base/leb128.h" +#include "base/malloc_arena_pool.h" +#include "class_linker.h" +#include "common_runtime_test.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file-inl.h" +#include "dex/dex_file.h" +#include "dex/dex_file_exception_helpers.h" +#include "gtest/gtest.h" +#include "handle_scope-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/stack_trace_element-inl.h" +#include "oat_quick_method_header.h" +#include "obj_ptr-inl.h" +#include "optimizing/stack_map_stream.h" +#include "runtime-inl.h" +#include "scoped_thread_state_change-inl.h" +#include "thread.h" + +namespace art { + +class ExceptionTest : public CommonRuntimeTest { + protected: + // Since various dexers may differ in bytecode layout, we play + // it safe and simply set the dex pc to the start of the method, + // which always points to the first source statement. + static constexpr const uint32_t kDexPc = 0; + + void SetUp() override { + CommonRuntimeTest::SetUp(); + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<2> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(LoadDex("ExceptionHandle")))); + my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader); + ASSERT_TRUE(my_klass_ != nullptr); + Handle klass(hs.NewHandle(my_klass_)); + class_linker_->EnsureInitialized(soa.Self(), klass, true, true); + my_klass_ = klass.Get(); + + dex_ = my_klass_->GetDexCache()->GetDexFile(); + + uint32_t code_size = 12; + for (size_t i = 0 ; i < code_size; i++) { + fake_code_.push_back(0x70 | i); + } + + const uint32_t native_pc_offset = 4u; + CHECK_ALIGNED_PARAM(native_pc_offset, GetInstructionSetInstructionAlignment(kRuntimeISA)); + + MallocArenaPool pool; + ArenaStack arena_stack(&pool); + ScopedArenaAllocator allocator(&arena_stack); + StackMapStream stack_maps(&allocator, kRuntimeISA); + stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u); + stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset); + stack_maps.EndStackMapEntry(); + stack_maps.EndMethod(); + ScopedArenaVector stack_map = stack_maps.Encode(); + + const size_t stack_maps_size = stack_map.size(); + const size_t header_size = sizeof(OatQuickMethodHeader); + const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA); + + fake_header_code_and_maps_.resize(stack_maps_size + header_size + code_size + code_alignment); + // NB: The start of the vector might not have been allocated the desired alignment. + uint8_t* code_ptr = + AlignUp(&fake_header_code_and_maps_[stack_maps_size + header_size], code_alignment); + + memcpy(&fake_header_code_and_maps_[0], stack_map.data(), stack_maps_size); + OatQuickMethodHeader method_header(code_ptr - fake_header_code_and_maps_.data(), code_size); + static_assert(std::is_trivially_copyable::value, "Cannot use memcpy"); + memcpy(code_ptr - header_size, &method_header, header_size); + memcpy(code_ptr, fake_code_.data(), fake_code_.size()); + + if (kRuntimeISA == InstructionSet::kArm) { + // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). + CHECK_ALIGNED(code_ptr, 2); + } + + method_f_ = my_klass_->FindClassMethod("f", "()I", kRuntimePointerSize); + ASSERT_TRUE(method_f_ != nullptr); + ASSERT_FALSE(method_f_->IsDirect()); + method_f_->SetEntryPointFromQuickCompiledCode(code_ptr); + + method_g_ = my_klass_->FindClassMethod("g", "(I)V", kRuntimePointerSize); + ASSERT_TRUE(method_g_ != nullptr); + ASSERT_FALSE(method_g_->IsDirect()); + method_g_->SetEntryPointFromQuickCompiledCode(code_ptr); + } + + const DexFile* dex_; + + std::vector fake_code_; + std::vector fake_header_code_and_maps_; + + ArtMethod* method_f_; + ArtMethod* method_g_; + + private: + ObjPtr my_klass_; +}; + +TEST_F(ExceptionTest, FindCatchHandler) { + ScopedObjectAccess soa(Thread::Current()); + CodeItemDataAccessor accessor(*dex_, dex_->GetCodeItem(method_f_->GetCodeItemOffset())); + + ASSERT_TRUE(accessor.HasCodeItem()); + + ASSERT_EQ(2u, accessor.TriesSize()); + ASSERT_NE(0u, accessor.InsnsSizeInCodeUnits()); + + const dex::TryItem& t0 = accessor.TryItems().begin()[0]; + const dex::TryItem& t1 = accessor.TryItems().begin()[1]; + EXPECT_LE(t0.start_addr_, t1.start_addr_); + { + CatchHandlerIterator iter(accessor, 4 /* Dex PC in the first try block */); + EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_STREQ("Ljava/lang/Exception;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_FALSE(iter.HasNext()); + } + { + CatchHandlerIterator iter(accessor, 8 /* Dex PC in the second try block */); + EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_FALSE(iter.HasNext()); + } + { + CatchHandlerIterator iter(accessor, 11 /* Dex PC not in any try block */); + EXPECT_FALSE(iter.HasNext()); + } +} + +TEST_F(ExceptionTest, StackTraceElement) { + Thread* thread = Thread::Current(); + thread->TransitionFromSuspendedToRunnable(); + bool started = runtime_->Start(); + CHECK(started); + JNIEnv* env = thread->GetJniEnv(); + ScopedObjectAccess soa(env); + + std::vector fake_stack; + Runtime* r = Runtime::Current(); + r->SetInstructionSet(kRuntimeISA); + ArtMethod* save_method = r->CreateCalleeSaveMethod(); + r->SetCalleeSaveMethod(save_method, CalleeSaveType::kSaveAllCalleeSaves); + QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method); + + ASSERT_EQ(kStackAlignment, 16U); + // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t)); + + // Create the stack frame for the callee save method, expected by the runtime. + fake_stack.push_back(reinterpret_cast(save_method)); + for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t); + i += sizeof(uintptr_t)) { + fake_stack.push_back(0); + } + + fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( + method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc + + // Create/push fake 16byte stack frame for method g + fake_stack.push_back(reinterpret_cast(method_g_)); + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( + method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc + + // Create/push fake 16byte stack frame for method f + fake_stack.push_back(reinterpret_cast(method_f_)); + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(0xEBAD6070); // return pc + + // Push Method* of null to terminate the trace + fake_stack.push_back(0); + + // Push null values which will become null incoming arguments. + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(0); + + // Set up thread to appear as if we called out of method_g_ at given pc dex. + thread->SetTopOfStack(reinterpret_cast(&fake_stack[0])); + + jobject internal = thread->CreateInternalStackTrace(soa); + ASSERT_TRUE(internal != nullptr); + jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); + ASSERT_TRUE(ste_array != nullptr); + auto trace_array = soa.Decode>(ste_array); + + ASSERT_TRUE(trace_array != nullptr); + ASSERT_TRUE(trace_array->Get(0) != nullptr); + EXPECT_STREQ("ExceptionHandle", + trace_array->Get(0)->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("ExceptionHandle.java", + trace_array->Get(0)->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("g", trace_array->Get(0)->GetMethodName()->ToModifiedUtf8().c_str()); + EXPECT_EQ(36, trace_array->Get(0)->GetLineNumber()); + + ASSERT_TRUE(trace_array->Get(1) != nullptr); + EXPECT_STREQ("ExceptionHandle", + trace_array->Get(1)->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("ExceptionHandle.java", + trace_array->Get(1)->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("f", trace_array->Get(1)->GetMethodName()->ToModifiedUtf8().c_str()); + EXPECT_EQ(22, trace_array->Get(1)->GetLineNumber()); + + thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach. +} + +} // namespace art diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc new file mode 100644 index 0000000..0f12457 --- /dev/null +++ b/compiler/jit/jit_compiler.cc @@ -0,0 +1,206 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jit_compiler.h" + +#include "android-base/stringprintf.h" + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "art_method-inl.h" +#include "base/logging.h" // For VLOG +#include "base/string_view_cpp20.h" +#include "base/systrace.h" +#include "base/time_utils.h" +#include "base/timing_logger.h" +#include "compiler.h" +#include "debug/elf_debug_writer.h" +#include "driver/compiler_options.h" +#include "jit/debugger_interface.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" +#include "jit/jit_logger.h" + +namespace art { +namespace jit { + +JitCompiler* JitCompiler::Create() { + return new JitCompiler(); +} + +void JitCompiler::ParseCompilerOptions() { + // Special case max code units for inlining, whose default is "unset" (implictly + // meaning no limit). Do this before parsing the actual passed options. + compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits); + Runtime* runtime = Runtime::Current(); + { + std::string error_msg; + if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(), + /*ignore_unrecognized=*/ true, + &error_msg)) { + LOG(FATAL) << error_msg; + UNREACHABLE(); + } + } + // JIT is never PIC, no matter what the runtime compiler options specify. + compiler_options_->SetNonPic(); + + // If the options don't provide whether we generate debuggable code, set + // debuggability based on the runtime value. + if (!compiler_options_->GetDebuggable()) { + compiler_options_->SetDebuggable(runtime->IsJavaDebuggable()); + } + + const InstructionSet instruction_set = compiler_options_->GetInstructionSet(); + if (kRuntimeISA == InstructionSet::kArm) { + DCHECK_EQ(instruction_set, InstructionSet::kThumb2); + } else { + DCHECK_EQ(instruction_set, kRuntimeISA); + } + std::unique_ptr instruction_set_features; + for (const std::string& option : runtime->GetCompilerOptions()) { + VLOG(compiler) << "JIT compiler option " << option; + std::string error_msg; + if (StartsWith(option, "--instruction-set-variant=")) { + const char* str = option.c_str() + strlen("--instruction-set-variant="); + VLOG(compiler) << "JIT instruction set variant " << str; + instruction_set_features = InstructionSetFeatures::FromVariant( + instruction_set, str, &error_msg); + if (instruction_set_features == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } else if (StartsWith(option, "--instruction-set-features=")) { + const char* str = option.c_str() + strlen("--instruction-set-features="); + VLOG(compiler) << "JIT instruction set features " << str; + if (instruction_set_features == nullptr) { + instruction_set_features = InstructionSetFeatures::FromVariant( + instruction_set, "default", &error_msg); + if (instruction_set_features == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + instruction_set_features = + instruction_set_features->AddFeaturesFromString(str, &error_msg); + if (instruction_set_features == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + } + + if (instruction_set_features == nullptr) { + // '--instruction-set-features/--instruction-set-variant' were not used. + // Use build-time defined features. + instruction_set_features = InstructionSetFeatures::FromCppDefines(); + } + compiler_options_->instruction_set_features_ = std::move(instruction_set_features); + compiler_options_->compiling_with_core_image_ = + CompilerOptions::IsCoreImageFilename(runtime->GetImageLocation()); + + if (compiler_options_->GetGenerateDebugInfo()) { + jit_logger_.reset(new JitLogger()); + jit_logger_->OpenLog(); + } +} + +extern "C" JitCompilerInterface* jit_load() { + VLOG(jit) << "Create jit compiler"; + auto* const jit_compiler = JitCompiler::Create(); + CHECK(jit_compiler != nullptr); + VLOG(jit) << "Done creating jit compiler"; + return jit_compiler; +} + +void JitCompiler::TypesLoaded(mirror::Class** types, size_t count) { + const CompilerOptions& compiler_options = GetCompilerOptions(); + if (compiler_options.GetGenerateDebugInfo()) { + InstructionSet isa = compiler_options.GetInstructionSet(); + const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures(); + const ArrayRef types_array(types, count); + std::vector elf_file = + debug::WriteDebugElfFileForClasses(isa, features, types_array); + + // NB: Don't allow packing since it would remove non-backtrace data. + MutexLock mu(Thread::Current(), *Locks::jit_lock_); + AddNativeDebugInfoForJit(/*code_ptr=*/ nullptr, elf_file, /*allow_packing=*/ false); + } +} + +bool JitCompiler::GenerateDebugInfo() { + return GetCompilerOptions().GetGenerateDebugInfo(); +} + +std::vector JitCompiler::PackElfFileForJIT(ArrayRef elf_files, + ArrayRef removed_symbols, + bool compress, + /*out*/ size_t* num_symbols) { + return debug::PackElfFileForJIT(elf_files, removed_symbols, compress, num_symbols); +} + +JitCompiler::JitCompiler() { + compiler_options_.reset(new CompilerOptions()); + ParseCompilerOptions(); + compiler_.reset( + Compiler::Create(*compiler_options_, /*storage=*/ nullptr, Compiler::kOptimizing)); +} + +JitCompiler::~JitCompiler() { + if (compiler_options_->GetGenerateDebugInfo()) { + jit_logger_->CloseLog(); + } +} + +bool JitCompiler::CompileMethod( + Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) { + SCOPED_TRACE << "JIT compiling " + << method->PrettyMethod() + << " (baseline=" << baseline << ", osr=" << osr << ")"; + + DCHECK(!method->IsProxyMethod()); + DCHECK(method->GetDeclaringClass()->IsResolved()); + + TimingLogger logger( + "JIT compiler timing logger", true, VLOG_IS_ON(jit), TimingLogger::TimingKind::kThreadCpu); + self->AssertNoPendingException(); + Runtime* runtime = Runtime::Current(); + + // Do the compilation. + bool success = false; + { + TimingLogger::ScopedTiming t2("Compiling", &logger); + JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); + uint64_t start_ns = NanoTime(); + success = compiler_->JitCompile( + self, code_cache, region, method, baseline, osr, jit_logger_.get()); + uint64_t duration_ns = NanoTime() - start_ns; + VLOG(jit) << "Compilation of " + << method->PrettyMethod() + << " took " + << PrettyDuration(duration_ns); + } + + // Trim maps to reduce memory usage. + // TODO: move this to an idle phase. + { + TimingLogger::ScopedTiming t2("TrimMaps", &logger); + runtime->GetJitArenaPool()->TrimMaps(); + } + + runtime->GetJit()->AddTimingLogger(logger); + return success; +} + +} // namespace jit +} // namespace art diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h new file mode 100644 index 0000000..09de1f8 --- /dev/null +++ b/compiler/jit/jit_compiler.h @@ -0,0 +1,74 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JIT_JIT_COMPILER_H_ +#define ART_COMPILER_JIT_JIT_COMPILER_H_ + +#include "base/mutex.h" + +#include "jit/jit.h" + +namespace art { + +class ArtMethod; +class Compiler; +class CompilerOptions; +class Thread; + +namespace jit { + +class JitLogger; +class JitMemoryRegion; + +class JitCompiler : public JitCompilerInterface { + public: + static JitCompiler* Create(); + virtual ~JitCompiler(); + + // Compilation entrypoint. Returns whether the compilation succeeded. + bool CompileMethod( + Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) + REQUIRES_SHARED(Locks::mutator_lock_) override; + + const CompilerOptions& GetCompilerOptions() const { + return *compiler_options_.get(); + } + + bool GenerateDebugInfo() override; + + void ParseCompilerOptions() override; + + void TypesLoaded(mirror::Class**, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) override; + + std::vector PackElfFileForJIT(ArrayRef elf_files, + ArrayRef removed_symbols, + bool compress, + /*out*/ size_t* num_symbols) override; + + private: + std::unique_ptr compiler_options_; + std::unique_ptr compiler_; + std::unique_ptr jit_logger_; + + JitCompiler(); + + DISALLOW_COPY_AND_ASSIGN(JitCompiler); +}; + +} // namespace jit +} // namespace art + +#endif // ART_COMPILER_JIT_JIT_COMPILER_H_ diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc new file mode 100644 index 0000000..6b9453f --- /dev/null +++ b/compiler/jit/jit_logger.cc @@ -0,0 +1,308 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jit_logger.h" + +#include "arch/instruction_set.h" +#include "art_method-inl.h" +#include "base/time_utils.h" +#include "base/unix_file/fd_file.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" +#include "oat_file-inl.h" + +namespace art { +namespace jit { + +#ifdef ART_TARGET_ANDROID +static const char* kLogPrefix = "/data/misc/trace"; +#else +static const char* kLogPrefix = "/tmp"; +#endif + +// File format of perf-PID.map: +// +---------------------+ +// |ADDR SIZE symbolname1| +// |ADDR SIZE symbolname2| +// |... | +// +---------------------+ +void JitLogger::OpenPerfMapLog() { + std::string pid_str = std::to_string(getpid()); + std::string perf_filename = std::string(kLogPrefix) + "/perf-" + pid_str + ".map"; + perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); + if (perf_file_ == nullptr) { + LOG(ERROR) << "Could not create perf file at " << perf_filename << + " Are you on a user build? Perf only works on userdebug/eng builds"; + } +} + +void JitLogger::WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method) { + if (perf_file_ != nullptr) { + std::string method_name = method->PrettyMethod(); + + std::ostringstream stream; + stream << std::hex + << reinterpret_cast(ptr) + << " " + << code_size + << " " + << method_name + << std::endl; + std::string str = stream.str(); + bool res = perf_file_->WriteFully(str.c_str(), str.size()); + if (!res) { + LOG(WARNING) << "Failed to write jitted method info in log: write failure."; + } + } else { + LOG(WARNING) << "Failed to write jitted method info in log: log file doesn't exist."; + } +} + +void JitLogger::ClosePerfMapLog() { + if (perf_file_ != nullptr) { + UNUSED(perf_file_->Flush()); + UNUSED(perf_file_->Close()); + } +} + +// File format of jit-PID.jump: +// +// +--------------------------------+ +// | PerfJitHeader | +// +--------------------------------+ +// | PerfJitCodeLoad { | . +// | struct PerfJitBase; | . +// | uint32_t process_id_; | . +// | uint32_t thread_id_; | . +// | uint64_t vma_; | . +// | uint64_t code_address_; | . +// | uint64_t code_size_; | . +// | uint64_t code_id_; | . +// | } | . +// +- -+ . +// | method_name'\0' | +--> one jitted method +// +- -+ . +// | jitted code binary | . +// | ... | . +// +--------------------------------+ . +// | PerfJitCodeDebugInfo { | . +// | struct PerfJitBase; | . +// | uint64_t address_; | . +// | uint64_t entry_count_; | . +// | struct PerfJitDebugEntry; | . +// | } | . +// +--------------------------------+ +// | PerfJitCodeLoad | +// ... +// +struct PerfJitHeader { + uint32_t magic_; // Characters "JiTD" + uint32_t version_; // Header version + uint32_t size_; // Total size of header + uint32_t elf_mach_target_; // Elf mach target + uint32_t reserved_; // Reserved, currently not used + uint32_t process_id_; // Process ID of the JIT compiler + uint64_t time_stamp_; // Timestamp when the header is generated + uint64_t flags_; // Currently the flags are only used for choosing clock for timestamp, + // we set it to 0 to tell perf that we use CLOCK_MONOTONIC clock. + static const uint32_t kMagic = 0x4A695444; // "JiTD" + static const uint32_t kVersion = 1; +}; + +// Each record starts with such basic information: event type, total size, and timestamp. +struct PerfJitBase { + enum PerfJitEvent { + // A jitted code load event. + // In ART JIT, it is used to log a new method is jit compiled and committed to jit-code-cache. + // Note that such kLoad event supports code cache GC in ART JIT. + // For every kLoad event recorded in jit-PID.dump and every perf sample recorded in perf.data, + // each event/sample has time stamp. In case code cache GC happens in ART JIT, and a new + // jitted method is committed to the same address of a previously deleted method, + // the time stamp information can help profiler to tell whether this sample belongs to the + // era of the first jitted method, or does it belong to the period of the second jitted method. + // JitCodeCache doesn't have to record any event on 'code delete'. + kLoad = 0, + + // A jitted code move event, i,e. a jitted code moved from one address to another address. + // It helps profiler to map samples to the right symbol even when the code is moved. + // In ART JIT, this event can help log such behavior: + // A jitted method is recorded in previous kLoad event, but due to some reason, + // it is moved to another address in jit-code-cache. + kMove = 1, + + // Logs debug line/column information. + kDebugInfo = 2, + + // Logs JIT VM end of life event. + kClose = 3 + }; + uint32_t event_; // Must be one of the events defined in PerfJitEvent. + uint32_t size_; // Total size of this event record. + // For example, for kLoad event, size of the event record is: + // sizeof(PerfJitCodeLoad) + method_name.size() + compiled code size. + uint64_t time_stamp_; // Timestamp for the event. +}; + +// Logs a jitted code load event (kLoad). +// In ART JIT, it is used to log a new method is jit compiled and commited to jit-code-cache. +struct PerfJitCodeLoad : PerfJitBase { + uint32_t process_id_; // Process ID who performs the jit code load. + // In ART JIT, it is the pid of the JIT compiler. + uint32_t thread_id_; // Thread ID who performs the jit code load. + // In ART JIT, it is the tid of the JIT compiler. + uint64_t vma_; // Address of the code section. In ART JIT, because code_address_ + // uses absolute address, this field is 0. + uint64_t code_address_; // Address where is jitted code is loaded. + uint64_t code_size_; // Size of the jitted code. + uint64_t code_id_; // Unique ID for each jitted code. +}; + +// This structure is for source line/column mapping. +// Currently this feature is not implemented in ART JIT yet. +struct PerfJitDebugEntry { + uint64_t address_; // Code address which maps to the line/column in source. + uint32_t line_number_; // Source line number starting at 1. + uint32_t column_; // Column discriminator, default 0. + const char name_[0]; // Followed by null-terminated name or \0xff\0 if same as previous. +}; + +// Logs debug line information (kDebugInfo). +// This structure is for source line/column mapping. +// Currently this feature is not implemented in ART JIT yet. +struct PerfJitCodeDebugInfo : PerfJitBase { + uint64_t address_; // Starting code address which the debug info describes. + uint64_t entry_count_; // How many instances of PerfJitDebugEntry. + PerfJitDebugEntry entries_[0]; // Followed by entry_count_ instances of PerfJitDebugEntry. +}; + +static uint32_t GetElfMach() { +#if defined(__arm__) + static const uint32_t kElfMachARM = 0x28; + return kElfMachARM; +#elif defined(__aarch64__) + static const uint32_t kElfMachARM64 = 0xB7; + return kElfMachARM64; +#elif defined(__i386__) + static const uint32_t kElfMachIA32 = 0x3; + return kElfMachIA32; +#elif defined(__x86_64__) + static const uint32_t kElfMachX64 = 0x3E; + return kElfMachX64; +#else + UNIMPLEMENTED(WARNING) << "Unsupported architecture in JitLogger"; + return 0; +#endif +} + +void JitLogger::OpenMarkerFile() { + int fd = jit_dump_file_->Fd(); + // The 'perf inject' tool requires that the jit-PID.dump file + // must have a mmap(PROT_READ|PROT_EXEC) record in perf.data. + marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); + if (marker_address_ == MAP_FAILED) { + LOG(WARNING) << "Failed to create record in perf.data. JITed code profiling will not work."; + return; + } +} + +void JitLogger::CloseMarkerFile() { + if (marker_address_ != nullptr) { + munmap(marker_address_, kPageSize); + } +} + +void JitLogger::WriteJitDumpDebugInfo() { + // In the future, we can add java source file line/column mapping here. +} + +void JitLogger::WriteJitDumpHeader() { + PerfJitHeader header; + + std::memset(&header, 0, sizeof(header)); + header.magic_ = PerfJitHeader::kMagic; + header.version_ = PerfJitHeader::kVersion; + header.size_ = sizeof(header); + header.elf_mach_target_ = GetElfMach(); + header.process_id_ = static_cast(getpid()); + header.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. + header.flags_ = 0; + + bool res = jit_dump_file_->WriteFully(reinterpret_cast(&header), sizeof(header)); + if (!res) { + LOG(WARNING) << "Failed to write profiling log. The 'perf inject' tool will not work."; + } +} + +void JitLogger::OpenJitDumpLog() { + std::string pid_str = std::to_string(getpid()); + std::string jitdump_filename = std::string(kLogPrefix) + "/jit-" + pid_str + ".dump"; + + jit_dump_file_.reset(OS::CreateEmptyFile(jitdump_filename.c_str())); + if (jit_dump_file_ == nullptr) { + LOG(ERROR) << "Could not create jit dump file at " << jitdump_filename << + " Are you on a user build? Perf only works on userdebug/eng builds"; + return; + } + + OpenMarkerFile(); + + // Continue to write jit-PID.dump file even above OpenMarkerFile() fails. + // Even if that means 'perf inject' tool cannot work, developers can still use other tools + // to map the samples in perf.data to the information (symbol,address,code) recorded + // in the jit-PID.dump file, and still proceed the jitted code analysis. + WriteJitDumpHeader(); +} + +void JitLogger::WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method) { + if (jit_dump_file_ != nullptr) { + std::string method_name = method->PrettyMethod(); + + PerfJitCodeLoad jit_code; + std::memset(&jit_code, 0, sizeof(jit_code)); + jit_code.event_ = PerfJitCodeLoad::kLoad; + jit_code.size_ = sizeof(jit_code) + method_name.size() + 1 + code_size; + jit_code.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. + jit_code.process_id_ = static_cast(getpid()); + jit_code.thread_id_ = static_cast(art::GetTid()); + jit_code.vma_ = 0x0; + jit_code.code_address_ = reinterpret_cast(ptr); + jit_code.code_size_ = code_size; + jit_code.code_id_ = code_index_++; + + // Write one complete jitted method info, including: + // - PerfJitCodeLoad structure + // - Method name + // - Complete generated code of this method + // + // Use UNUSED() here to avoid compiler warnings. + UNUSED(jit_dump_file_->WriteFully(reinterpret_cast(&jit_code), sizeof(jit_code))); + UNUSED(jit_dump_file_->WriteFully(method_name.c_str(), method_name.size() + 1)); + UNUSED(jit_dump_file_->WriteFully(ptr, code_size)); + + WriteJitDumpDebugInfo(); + } +} + +void JitLogger::CloseJitDumpLog() { + if (jit_dump_file_ != nullptr) { + CloseMarkerFile(); + UNUSED(jit_dump_file_->Flush()); + UNUSED(jit_dump_file_->Close()); + } +} + +} // namespace jit +} // namespace art diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h new file mode 100644 index 0000000..f4ef75a --- /dev/null +++ b/compiler/jit/jit_logger.h @@ -0,0 +1,138 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JIT_JIT_LOGGER_H_ +#define ART_COMPILER_JIT_JIT_LOGGER_H_ + +#include + +#include "base/mutex.h" +#include "base/os.h" +#include "compiled_method.h" + +namespace art { + +class ArtMethod; + +namespace jit { + +// +// JitLogger supports two approaches of perf profiling. +// +// (1) perf-map: +// The perf-map mechanism generates perf-PID.map file, +// which provides simple "address, size, method_name" information to perf, +// and allows perf to map samples in jit-code-cache to jitted method symbols. +// +// Command line Example: +// $ perf record dalvikvm -Xcompiler-option --generate-debug-info -cp Test +// $ perf report +// NOTE: +// - Make sure that the perf-PID.map file is available for 'perf report' tool to access, +// so that jitted method can be displayed. +// +// +// (2) perf-inject: +// The perf-inject mechansim generates jit-PID.dump file, +// which provides rich informations about a jitted method. +// It allows perf or other profiling tools to do advanced analysis on jitted code, +// for example instruction level profiling. +// +// Command line Example: +// $ perf record -k mono dalvikvm -Xcompiler-option --generate-debug-info -cp Test +// $ perf inject -i perf.data -o perf.data.jitted +// $ perf report -i perf.data.jitted +// $ perf annotate -i perf.data.jitted +// NOTE: +// REQUIREMENTS +// - The 'perf record -k mono' option requires 4.1 (or higher) Linux kernel. +// - The 'perf inject' (generating jit ELF files feature) requires perf 4.6 (or higher). +// PERF RECORD +// - The '-k mono' option tells 'perf record' to use CLOCK_MONOTONIC clock during sampling; +// which is required by 'perf inject', to make sure that both perf.data and jit-PID.dump +// have unified clock source for timestamps. +// PERF INJECT +// - The 'perf inject' tool injects information from jit-PID.dump into perf.data file, +// and generates small ELF files (jitted-TID-CODEID.so) for each jitted method. +// - On Android devices, the jit-PID.dump file is generated in /data/misc/trace/ folder, and +// such location is recorded in perf.data file. +// The 'perf inject' tool is going to look for jit-PID.dump and generates small ELF files in +// this /data/misc/trace/ folder. +// Make sure that you have the read/write access to /data/misc/trace/ folder. +// - On non-Android devices, the jit-PID.dump file is generated in /tmp/ folder, and +// 'perf inject' tool operates on this folder. +// Make sure that you have the read/write access to /tmp/ folder. +// - If you are executing 'perf inject' on non-Android devices (host), but perf.data and +// jit-PID.dump files are adb-pulled from Android devices, make sure that there is a +// /data/misc/trace/ folder on host, and jit-PID.dump file is copied to this folder. +// - Currently 'perf inject' doesn't provide option to change the path for jit-PID.dump and +// generated ELF files. +// PERF ANNOTATE +// - The 'perf annotate' tool displays assembly level profiling report. +// Source code can also be displayed if the ELF file has debug symbols. +// - Make sure above small ELF files are available for 'perf annotate' tool to access, +// so that jitted code can be displayed in assembly view. +// +class JitLogger { + public: + JitLogger() : code_index_(0), marker_address_(nullptr) {} + + void OpenLog() { + OpenPerfMapLog(); + OpenJitDumpLog(); + } + + void WriteLog(const void* ptr, size_t code_size, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) { + WritePerfMapLog(ptr, code_size, method); + WriteJitDumpLog(ptr, code_size, method); + } + + void CloseLog() { + ClosePerfMapLog(); + CloseJitDumpLog(); + } + + private: + // For perf-map profiling + void OpenPerfMapLog(); + void WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); + void ClosePerfMapLog(); + + // For perf-inject profiling + void OpenJitDumpLog(); + void WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); + void CloseJitDumpLog(); + + void OpenMarkerFile(); + void CloseMarkerFile(); + void WriteJitDumpHeader(); + void WriteJitDumpDebugInfo(); + + std::unique_ptr perf_file_; + std::unique_ptr jit_dump_file_; + uint64_t code_index_; + void* marker_address_; + + DISALLOW_COPY_AND_ASSIGN(JitLogger); +}; + +} // namespace jit +} // namespace art + +#endif // ART_COMPILER_JIT_JIT_LOGGER_H_ diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc new file mode 100644 index 0000000..cec94c9 --- /dev/null +++ b/compiler/jni/jni_cfi_test.cc @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "arch/instruction_set.h" +#include "base/arena_allocator.h" +#include "base/enums.h" +#include "base/malloc_arena_pool.h" +#include "cfi_test.h" +#include "gtest/gtest.h" +#include "jni/quick/calling_convention.h" +#include "read_barrier_config.h" +#include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" + +#include "jni/jni_cfi_test_expected.inc" + +namespace art { + +// Run the tests only on host. +#ifndef ART_TARGET_ANDROID + +class JNICFITest : public CFITest { + public: + // Enable this flag to generate the expected outputs. + static constexpr bool kGenerateExpected = false; + + void TestImpl(InstructionSet isa, + const char* isa_str, + const std::vector& expected_asm, + const std::vector& expected_cfi) { + if (Is64BitInstructionSet(isa)) { + TestImplSized(isa, isa_str, expected_asm, expected_cfi); + } else { + TestImplSized(isa, isa_str, expected_asm, expected_cfi); + } + } + + private: + template + void TestImplSized(InstructionSet isa, + const char* isa_str, + const std::vector& expected_asm, + const std::vector& expected_cfi) { + // Description of simple method. + const bool is_static = true; + const bool is_synchronized = false; + const char* shorty = "IIFII"; + + MallocArenaPool pool; + ArenaAllocator allocator(&pool); + + std::unique_ptr jni_conv( + JniCallingConvention::Create(&allocator, + is_static, + is_synchronized, + /*is_critical_native*/false, + shorty, + isa)); + std::unique_ptr mr_conv( + ManagedRuntimeCallingConvention::Create( + &allocator, is_static, is_synchronized, shorty, isa)); + const int frame_size(jni_conv->FrameSize()); + ArrayRef callee_save_regs = jni_conv->CalleeSaveRegisters(); + + // Assemble the method. + std::unique_ptr> jni_asm( + JNIMacroAssembler::Create(&allocator, isa)); + jni_asm->cfi().SetEnabled(true); + jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(), + callee_save_regs, mr_conv->EntrySpills()); + jni_asm->IncreaseFrameSize(32); + jni_asm->DecreaseFrameSize(32); + jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true); + jni_asm->FinalizeCode(); + std::vector actual_asm(jni_asm->CodeSize()); + MemoryRegion code(&actual_asm[0], actual_asm.size()); + jni_asm->FinalizeInstructions(code); + ASSERT_EQ(jni_asm->cfi().GetCurrentCFAOffset(), frame_size); + const std::vector& actual_cfi = *(jni_asm->cfi().data()); + + if (kGenerateExpected) { + GenerateExpected(stdout, + isa, + isa_str, + ArrayRef(actual_asm), + ArrayRef(actual_cfi)); + } else { + EXPECT_EQ(expected_asm, actual_asm); + EXPECT_EQ(expected_cfi, actual_cfi); + } + } +}; + +#define TEST_ISA(isa) \ + TEST_F(JNICFITest, isa) { \ + std::vector expected_asm(expected_asm_##isa, \ + expected_asm_##isa + arraysize(expected_asm_##isa)); \ + std::vector expected_cfi(expected_cfi_##isa, \ + expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ + TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \ + } + +#ifdef ART_ENABLE_CODEGEN_arm +// Run the tests for ARM only with Baker read barriers, as the +// expected generated code contains a Marking Register refresh +// instruction. +#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) +TEST_ISA(kThumb2) +#endif +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +// Run the tests for ARM64 only with Baker read barriers, as the +// expected generated code contains a Marking Register refresh +// instruction. +#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) +TEST_ISA(kArm64) +#endif +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +TEST_ISA(kX86) +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +TEST_ISA(kX86_64) +#endif + +#endif // ART_TARGET_ANDROID + +} // namespace art diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc new file mode 100644 index 0000000..489ae00 --- /dev/null +++ b/compiler/jni/jni_cfi_test_expected.inc @@ -0,0 +1,330 @@ +static constexpr uint8_t expected_asm_kThumb2[] = { + 0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90, + 0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0, + 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D, + 0xD9, 0xF8, 0x34, 0x80, 0x70, 0x47, +}; +static constexpr uint8_t expected_cfi_kThumb2[] = { + 0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A, + 0x03, 0x8B, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x5C, 0x05, 0x50, 0x17, 0x05, + 0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05, + 0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05, + 0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05, + 0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01, + 0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C, + 0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06, + 0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06, + 0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x4A, + 0x0B, 0x0E, 0x80, 0x01, +}; +// 0x00000000: push {r5,r6,r7,r8,r10,r11,lr} +// 0x00000004: .cfi_def_cfa_offset: 28 +// 0x00000004: .cfi_offset: r5 at cfa-28 +// 0x00000004: .cfi_offset: r6 at cfa-24 +// 0x00000004: .cfi_offset: r7 at cfa-20 +// 0x00000004: .cfi_offset: r8 at cfa-16 +// 0x00000004: .cfi_offset: r10 at cfa-12 +// 0x00000004: .cfi_offset: r11 at cfa-8 +// 0x00000004: .cfi_offset: r14 at cfa-4 +// 0x00000004: vpush {s16-s31} +// 0x00000008: .cfi_def_cfa_offset: 92 +// 0x00000008: .cfi_offset_extended: r80 at cfa-92 +// 0x00000008: .cfi_offset_extended: r81 at cfa-88 +// 0x00000008: .cfi_offset_extended: r82 at cfa-84 +// 0x00000008: .cfi_offset_extended: r83 at cfa-80 +// 0x00000008: .cfi_offset_extended: r84 at cfa-76 +// 0x00000008: .cfi_offset_extended: r85 at cfa-72 +// 0x00000008: .cfi_offset_extended: r86 at cfa-68 +// 0x00000008: .cfi_offset_extended: r87 at cfa-64 +// 0x00000008: .cfi_offset_extended: r88 at cfa-60 +// 0x00000008: .cfi_offset_extended: r89 at cfa-56 +// 0x00000008: .cfi_offset_extended: r90 at cfa-52 +// 0x00000008: .cfi_offset_extended: r91 at cfa-48 +// 0x00000008: .cfi_offset_extended: r92 at cfa-44 +// 0x00000008: .cfi_offset_extended: r93 at cfa-40 +// 0x00000008: .cfi_offset_extended: r94 at cfa-36 +// 0x00000008: .cfi_offset_extended: r95 at cfa-32 +// 0x00000008: sub sp, #36 +// 0x0000000a: .cfi_def_cfa_offset: 128 +// 0x0000000a: str r0, [sp] +// 0x0000000c: str r1, [sp, #132] +// 0x0000000e: vstr s0, [sp, #136] +// 0x00000012: str r2, [sp, #140] +// 0x00000014: str r3, [sp, #144] +// 0x00000016: sub sp, #32 +// 0x00000018: .cfi_def_cfa_offset: 160 +// 0x00000018: add sp, #32 +// 0x0000001a: .cfi_def_cfa_offset: 128 +// 0x0000001a: .cfi_remember_state +// 0x0000001a: add sp, #36 +// 0x0000001c: .cfi_def_cfa_offset: 92 +// 0x0000001c: vpop {s16-s31} +// 0x00000020: .cfi_def_cfa_offset: 28 +// 0x00000020: .cfi_restore_extended: r80 +// 0x00000020: .cfi_restore_extended: r81 +// 0x00000020: .cfi_restore_extended: r82 +// 0x00000020: .cfi_restore_extended: r83 +// 0x00000020: .cfi_restore_extended: r84 +// 0x00000020: .cfi_restore_extended: r85 +// 0x00000020: .cfi_restore_extended: r86 +// 0x00000020: .cfi_restore_extended: r87 +// 0x00000020: .cfi_restore_extended: r88 +// 0x00000020: .cfi_restore_extended: r89 +// 0x00000020: .cfi_restore_extended: r90 +// 0x00000020: .cfi_restore_extended: r91 +// 0x00000020: .cfi_restore_extended: r92 +// 0x00000020: .cfi_restore_extended: r93 +// 0x00000020: .cfi_restore_extended: r94 +// 0x00000020: .cfi_restore_extended: r95 +// 0x00000020: pop {r5,r6,r7,r8,r10,r11,lr} +// 0x00000024: ldr r8, [tr, #52] ; is_gc_marking +// 0x00000028: bx lr +// 0x0000002a: .cfi_restore_state +// 0x0000002a: .cfi_def_cfa_offset: 128 + +static constexpr uint8_t expected_asm_kArm64[] = { + 0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9, + 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9, + 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D, + 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xF9, + 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, 0xE2, 0xD3, 0x00, 0xB9, + 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91, + 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, + 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, + 0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D, + 0xEE, 0x3F, 0x45, 0x6D, 0x74, 0x36, 0x40, 0xB9, 0xFF, 0x03, 0x03, 0x91, + 0xC0, 0x03, 0x5F, 0xD6, +}; +static constexpr uint8_t expected_cfi_kArm64[] = { + 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14, + 0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A, + 0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05, + 0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22, + 0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05, + 0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44, + 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44, + 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06, + 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06, + 0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01, +}; +// 0x00000000: sub sp, sp, #0xc0 (192) +// 0x00000004: .cfi_def_cfa_offset: 192 +// 0x00000004: stp tr, x20, [sp, #96] +// 0x00000008: .cfi_offset: r19 at cfa-96 +// 0x00000008: .cfi_offset: r20 at cfa-88 +// 0x00000008: stp x21, x22, [sp, #112] +// 0x0000000c: .cfi_offset: r21 at cfa-80 +// 0x0000000c: .cfi_offset: r22 at cfa-72 +// 0x0000000c: stp x23, x24, [sp, #128] +// 0x00000010: .cfi_offset: r23 at cfa-64 +// 0x00000010: .cfi_offset: r24 at cfa-56 +// 0x00000010: stp x25, x26, [sp, #144] +// 0x00000014: .cfi_offset: r25 at cfa-48 +// 0x00000014: .cfi_offset: r26 at cfa-40 +// 0x00000014: stp x27, x28, [sp, #160] +// 0x00000018: .cfi_offset: r27 at cfa-32 +// 0x00000018: .cfi_offset: r28 at cfa-24 +// 0x00000018: stp x29, lr, [sp, #176] +// 0x0000001c: .cfi_offset: r29 at cfa-16 +// 0x0000001c: .cfi_offset: r30 at cfa-8 +// 0x0000001c: stp d8, d9, [sp, #32] +// 0x00000020: .cfi_offset_extended: r72 at cfa-160 +// 0x00000020: .cfi_offset_extended: r73 at cfa-152 +// 0x00000020: stp d10, d11, [sp, #48] +// 0x00000024: .cfi_offset_extended: r74 at cfa-144 +// 0x00000024: .cfi_offset_extended: r75 at cfa-136 +// 0x00000024: stp d12, d13, [sp, #64] +// 0x00000028: .cfi_offset_extended: r76 at cfa-128 +// 0x00000028: .cfi_offset_extended: r77 at cfa-120 +// 0x00000028: stp d14, d15, [sp, #80] +// 0x0000002c: .cfi_offset_extended: r78 at cfa-112 +// 0x0000002c: .cfi_offset_extended: r79 at cfa-104 +// 0x0000002c: str x0, [sp] +// 0x00000030: str w1, [sp, #200] +// 0x00000034: str s0, [sp, #204] +// 0x00000038: str w2, [sp, #208] +// 0x0000003c: str w3, [sp, #212] +// 0x00000040: sub sp, sp, #0x20 (32) +// 0x00000044: .cfi_def_cfa_offset: 224 +// 0x00000044: add sp, sp, #0x20 (32) +// 0x00000048: .cfi_def_cfa_offset: 192 +// 0x00000048: .cfi_remember_state +// 0x00000048: ldp tr, x20, [sp, #96] +// 0x0000004c: .cfi_restore: r19 +// 0x0000004c: .cfi_restore: r20 +// 0x0000004c: ldp x21, x22, [sp, #112] +// 0x00000050: .cfi_restore: r21 +// 0x00000050: .cfi_restore: r22 +// 0x00000050: ldp x23, x24, [sp, #128] +// 0x00000054: .cfi_restore: r23 +// 0x00000054: .cfi_restore: r24 +// 0x00000054: ldp x25, x26, [sp, #144] +// 0x00000058: .cfi_restore: r25 +// 0x00000058: .cfi_restore: r26 +// 0x00000058: ldp x27, x28, [sp, #160] +// 0x0000005c: .cfi_restore: r27 +// 0x0000005c: .cfi_restore: r28 +// 0x0000005c: ldp x29, lr, [sp, #176] +// 0x00000060: .cfi_restore: r29 +// 0x00000060: .cfi_restore: r30 +// 0x00000060: ldp d8, d9, [sp, #32] +// 0x00000064: .cfi_restore_extended: r72 +// 0x00000064: .cfi_restore_extended: r73 +// 0x00000064: ldp d10, d11, [sp, #48] +// 0x00000068: .cfi_restore_extended: r74 +// 0x00000068: .cfi_restore_extended: r75 +// 0x00000068: ldp d12, d13, [sp, #64] +// 0x0000006c: .cfi_restore_extended: r76 +// 0x0000006c: .cfi_restore_extended: r77 +// 0x0000006c: ldp d14, d15, [sp, #80] +// 0x00000070: .cfi_restore_extended: r78 +// 0x00000070: .cfi_restore_extended: r79 +// 0x00000070: ldr w20, [tr, #52] ; is_gc_marking +// 0x00000074: add sp, sp, #0xc0 (192) +// 0x00000078: .cfi_def_cfa_offset: 0 +// 0x00000078: ret +// 0x0000007c: .cfi_restore_state +// 0x0000007c: .cfi_def_cfa_offset: 192 + +static constexpr uint8_t expected_asm_kX86[] = { + 0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3, + 0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24, + 0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E, + 0x5F, 0xC3, +}; +static constexpr uint8_t expected_cfi_kX86[] = { + 0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E, + 0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50, + 0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41, + 0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30, +}; +// 0x00000000: push edi +// 0x00000001: .cfi_def_cfa_offset: 8 +// 0x00000001: .cfi_offset: r7 at cfa-8 +// 0x00000001: push esi +// 0x00000002: .cfi_def_cfa_offset: 12 +// 0x00000002: .cfi_offset: r6 at cfa-12 +// 0x00000002: push ebp +// 0x00000003: .cfi_def_cfa_offset: 16 +// 0x00000003: .cfi_offset: r5 at cfa-16 +// 0x00000003: add esp, -28 +// 0x00000006: .cfi_def_cfa_offset: 44 +// 0x00000006: push eax +// 0x00000007: .cfi_def_cfa_offset: 48 +// 0x00000007: mov [esp + 52], ecx +// 0x0000000b: movss [esp + 56], xmm0 +// 0x00000011: mov [esp + 60], edx +// 0x00000015: mov [esp + 64], ebx +// 0x00000019: add esp, -32 +// 0x0000001c: .cfi_def_cfa_offset: 80 +// 0x0000001c: add esp, 32 +// 0x0000001f: .cfi_def_cfa_offset: 48 +// 0x0000001f: .cfi_remember_state +// 0x0000001f: add esp, 32 +// 0x00000022: .cfi_def_cfa_offset: 16 +// 0x00000022: pop ebp +// 0x00000023: .cfi_def_cfa_offset: 12 +// 0x00000023: .cfi_restore: r5 +// 0x00000023: pop esi +// 0x00000024: .cfi_def_cfa_offset: 8 +// 0x00000024: .cfi_restore: r6 +// 0x00000024: pop edi +// 0x00000025: .cfi_def_cfa_offset: 4 +// 0x00000025: .cfi_restore: r7 +// 0x00000025: ret +// 0x00000026: .cfi_restore_state +// 0x00000026: .cfi_def_cfa_offset: 48 + +static constexpr uint8_t expected_asm_kX86_64[] = { + 0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83, + 0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F, + 0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2, + 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4, + 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00, + 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, + 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, + 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, + 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, + 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, + 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3, +}; +static constexpr uint8_t expected_cfi_kX86_64[] = { + 0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E, + 0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86, + 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0, + 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E, + 0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, + 0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, + 0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, + 0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01, +}; +// 0x00000000: push r15 +// 0x00000002: .cfi_def_cfa_offset: 16 +// 0x00000002: .cfi_offset: r15 at cfa-16 +// 0x00000002: push r14 +// 0x00000004: .cfi_def_cfa_offset: 24 +// 0x00000004: .cfi_offset: r14 at cfa-24 +// 0x00000004: push r13 +// 0x00000006: .cfi_def_cfa_offset: 32 +// 0x00000006: .cfi_offset: r13 at cfa-32 +// 0x00000006: push r12 +// 0x00000008: .cfi_def_cfa_offset: 40 +// 0x00000008: .cfi_offset: r12 at cfa-40 +// 0x00000008: push rbp +// 0x00000009: .cfi_def_cfa_offset: 48 +// 0x00000009: .cfi_offset: r6 at cfa-48 +// 0x00000009: push rbx +// 0x0000000a: .cfi_def_cfa_offset: 56 +// 0x0000000a: .cfi_offset: r3 at cfa-56 +// 0x0000000a: subq rsp, 72 +// 0x0000000e: .cfi_def_cfa_offset: 128 +// 0x0000000e: movsd [rsp + 64], xmm15 +// 0x00000015: .cfi_offset: r32 at cfa-64 +// 0x00000015: movsd [rsp + 56], xmm14 +// 0x0000001c: .cfi_offset: r31 at cfa-72 +// 0x0000001c: movsd [rsp + 48], xmm13 +// 0x00000023: .cfi_offset: r30 at cfa-80 +// 0x00000023: movsd [rsp + 40], xmm12 +// 0x0000002a: .cfi_offset: r29 at cfa-88 +// 0x0000002a: movq [rsp], rdi +// 0x0000002e: mov [rsp + 136], esi +// 0x00000035: movss [rsp + 140], xmm0 +// 0x0000003e: mov [rsp + 144], edx +// 0x00000045: mov [rsp + 148], ecx +// 0x0000004c: addq rsp, -32 +// 0x00000050: .cfi_def_cfa_offset: 160 +// 0x00000050: addq rsp, 32 +// 0x00000054: .cfi_def_cfa_offset: 128 +// 0x00000054: .cfi_remember_state +// 0x00000054: movsd xmm12, [rsp + 40] +// 0x0000005b: .cfi_restore: r29 +// 0x0000005b: movsd xmm13, [rsp + 48] +// 0x00000062: .cfi_restore: r30 +// 0x00000062: movsd xmm14, [rsp + 56] +// 0x00000069: .cfi_restore: r31 +// 0x00000069: movsd xmm15, [rsp + 64] +// 0x00000070: .cfi_restore: r32 +// 0x00000070: addq rsp, 72 +// 0x00000074: .cfi_def_cfa_offset: 56 +// 0x00000074: pop rbx +// 0x00000075: .cfi_def_cfa_offset: 48 +// 0x00000075: .cfi_restore: r3 +// 0x00000075: pop rbp +// 0x00000076: .cfi_def_cfa_offset: 40 +// 0x00000076: .cfi_restore: r6 +// 0x00000076: pop r12 +// 0x00000078: .cfi_def_cfa_offset: 32 +// 0x00000078: .cfi_restore: r12 +// 0x00000078: pop r13 +// 0x0000007a: .cfi_def_cfa_offset: 24 +// 0x0000007a: .cfi_restore: r13 +// 0x0000007a: pop r14 +// 0x0000007c: .cfi_def_cfa_offset: 16 +// 0x0000007c: .cfi_restore: r14 +// 0x0000007c: pop r15 +// 0x0000007e: .cfi_def_cfa_offset: 8 +// 0x0000007e: .cfi_restore: r15 +// 0x0000007e: ret +// 0x0000007f: .cfi_restore_state +// 0x0000007f: .cfi_def_cfa_offset: 128 + diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc new file mode 100644 index 0000000..405c9ec --- /dev/null +++ b/compiler/jni/jni_compiler_test.cc @@ -0,0 +1,2244 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +#include "art_method-inl.h" +#include "base/bit_utils.h" +#include "base/mem_map.h" +#include "class_linker.h" +#include "common_compiler_test.h" +#include "compiler.h" +#include "dex/dex_file.h" +#include "gtest/gtest.h" +#include "indirect_reference_table.h" +#include "jni/java_vm_ext.h" +#include "jni/jni_internal.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/stack_trace_element-inl.h" +#include "nativehelper/ScopedLocalRef.h" +#include "nativeloader/native_loader.h" +#include "runtime.h" +#include "scoped_thread_state_change-inl.h" +#include "thread.h" + +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar(JNIEnv*, jobject, jint count) { + return count + 1; +} + +// Note: JNI name mangling "_" -> "_1". +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar_1Fast(JNIEnv*, jobject, jint count) { + return count + 1; +} + +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint count) { + return count + 1; +} + +// Note: JNI name mangling "_" -> "_1". +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar_1Fast(JNIEnv*, jclass, jint count) { + return count + 1; +} + +// Note: JNI name mangling "_" -> "_1". +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar_1Critical(jint count) { + return count + 1; +} + +// TODO: In the Baker read barrier configuration, add checks to ensure +// the Marking Register's value is correct. + +namespace art { + +enum class JniKind { + kNormal, // Regular kind of un-annotated natives. + kFast, // Native method annotated with @FastNative. + kCritical, // Native method annotated with @CriticalNative. + kCount // How many different types of JNIs we can have. +}; + +// Used to initialize array sizes that want to have different state per current jni. +static constexpr size_t kJniKindCount = static_cast(JniKind::kCount); +// Do not use directly, use the helpers instead. +uint32_t gCurrentJni = static_cast(JniKind::kNormal); + +// Is the current native method under test @CriticalNative? +static bool IsCurrentJniCritical() { + return gCurrentJni == static_cast(JniKind::kCritical); +} + +// Is the current native method under test @FastNative? +static bool IsCurrentJniFast() { + return gCurrentJni == static_cast(JniKind::kFast); +} + +// Is the current native method a plain-old non-annotated native? +static bool IsCurrentJniNormal() { + return gCurrentJni == static_cast(JniKind::kNormal); +} + +// Signify that a different kind of JNI is about to be tested. +static void UpdateCurrentJni(JniKind kind) { + gCurrentJni = static_cast(kind); +} + +// (Match the name suffixes of native methods in MyClassNatives.java) +static std::string CurrentJniStringSuffix() { + switch (gCurrentJni) { + case static_cast(JniKind::kNormal): { + return ""; + } + case static_cast(JniKind::kFast): { + return "_Fast"; + } + case static_cast(JniKind::kCritical): { + return "_Critical"; + } + default: + LOG(FATAL) << "Invalid current JNI value: " << gCurrentJni; + UNREACHABLE(); + } +} + +// Dummy values passed to our JNI handlers when we enter @CriticalNative. +// Normally @CriticalNative calling convention strips out the "JNIEnv*, jclass" parameters. +// However to avoid duplicating every single test method we have a templated handler +// that inserts dummy parameters (0,1) to make it compatible with a regular JNI handler. +static JNIEnv* const kCriticalDummyJniEnv = reinterpret_cast(0xDEADFEAD); +static jclass const kCriticalDummyJniClass = reinterpret_cast(0xBEAFBEEF); + +// Type trait. Returns true if "T" is the same type as one of the types in Args... +// +// Logically equal to OR(std::same_type for all U in Args). +template +struct is_any_of; + +template +struct is_any_of { + using value_type = bool; + static constexpr const bool value = std::is_same::value || is_any_of::value; +}; + +template +struct is_any_of { + using value_type = bool; + static constexpr const bool value = std::is_same::value; +}; + +// Type traits for JNI types. +template +struct jni_type_traits { + // True if type T ends up holding an object reference. False otherwise. + // (Non-JNI types will also be false). + static constexpr const bool is_ref = + is_any_of::value; +}; + +template +struct count_refs_helper { + using value_type = size_t; + static constexpr const size_t value = 0; +}; + +template +struct count_refs_helper { + using value_type = size_t; + static constexpr size_t value = + (jni_type_traits::is_ref ? 1 : 0) + count_refs_helper::value; +}; + +// Base case: No parameters = 0 refs. +size_t count_nonnull_refs_helper() { + return 0; +} + +// SFINAE for ref types. 1 if non-null, 0 otherwise. +template +size_t count_nonnull_refs_single_helper(T arg, + typename std::enable_if::is_ref>::type* + = nullptr) { + return ((arg == NULL) ? 0 : 1); +} + +// SFINAE for non-ref-types. Always 0. +template +size_t count_nonnull_refs_single_helper(T arg ATTRIBUTE_UNUSED, + typename std::enable_if::is_ref>::type* + = nullptr) { + return 0; +} + +// Recursive case. +template +size_t count_nonnull_refs_helper(T arg, Args ... args) { + return count_nonnull_refs_single_helper(arg) + count_nonnull_refs_helper(args...); +} + +// Given any list of parameters, check how many object refs there are and only count +// them if their runtime value is non-null. +// +// For example given (jobject, jint, jclass) we can get (2) if both #0/#2 are non-null, +// (1) if either #0/#2 are null but not both, and (0) if all parameters are null. +// Primitive parameters (including JNIEnv*, if present) are ignored. +template +size_t count_nonnull_refs(Args ... args) { + return count_nonnull_refs_helper(args...); +} + +template +struct remove_extra_parameters_helper; + +template +struct remove_extra_parameters_helper { + // Note: Do not use Args&& here to maintain C-style parameter types. + static R apply(Args... args) { + JNIEnv* env = kCriticalDummyJniEnv; + jclass kls = kCriticalDummyJniClass; + return fn(env, kls, args...); + } +}; + +// Given a function 'fn' create a function 'apply' which will omit the JNIEnv/jklass parameters +// +// i.e. if fn(JNIEnv*,jklass,a,b,c,d,e...) then apply(a,b,c,d,e,...) +template +struct jni_remove_extra_parameters : public remove_extra_parameters_helper {}; + +class JniCompilerTest : public CommonCompilerTest { + protected: + void SetUp() override { + CommonCompilerTest::SetUp(); + check_generic_jni_ = false; + } + + void TearDown() override { + android::ResetNativeLoader(); + CommonCompilerTest::TearDown(); + } + + void SetCheckGenericJni(bool generic) { + check_generic_jni_ = generic; + } + + private: + void CompileForTest(jobject class_loader, + bool direct, + const char* method_name, + const char* method_sig) { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + Handle loader( + hs.NewHandle(soa.Decode(class_loader))); + // Compile the native method before starting the runtime + ObjPtr c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader); + const auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = c->FindClassMethod(method_name, method_sig, pointer_size); + ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig; + ASSERT_EQ(direct, method->IsDirect()) << method_name << " " << method_sig; + if (check_generic_jni_) { + method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub()); + } else { + const void* code = method->GetEntryPointFromQuickCompiledCode(); + if (code == nullptr || class_linker_->IsQuickGenericJniStub(code)) { + CompileMethod(method); + ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) + << method_name << " " << method_sig; + } + } + } + + protected: + void CompileForTestWithCurrentJni(jobject class_loader, + bool direct, + const char* method_name_orig, + const char* method_sig) { + // Append the JNI kind to the method name, so that we automatically get the + // fast or critical versions of the same method. + std::string method_name_str = std::string(method_name_orig) + CurrentJniStringSuffix(); + const char* method_name = method_name_str.c_str(); + + CompileForTest(class_loader, direct, method_name, method_sig); + } + + void SetUpForTest(bool direct, + const char* method_name_orig, + const char* method_sig, + void* native_fnptr) { + // Append the JNI kind to the method name, so that we automatically get the + // fast or critical versions of the same method. + std::string method_name_str = std::string(method_name_orig) + CurrentJniStringSuffix(); + const char* method_name = method_name_str.c_str(); + + // Initialize class loader and compile method when runtime not started. + if (!runtime_->IsStarted()) { + { + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + } + CompileForTest(class_loader_, direct, method_name, method_sig); + // Start runtime. + Thread::Current()->TransitionFromSuspendedToRunnable(); + android::InitializeNativeLoader(); + bool started = runtime_->Start(); + CHECK(started); + } + // JNI operations after runtime start. + env_ = Thread::Current()->GetJniEnv(); + jklass_ = env_->FindClass("MyClassNatives"); + ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig; + + if (direct) { + jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig); + } else { + jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig); + } + ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig; + + if (native_fnptr != nullptr) { + JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } }; + ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1)) + << method_name << " " << method_sig; + } else { + env_->UnregisterNatives(jklass_); + } + + jmethodID constructor = env_->GetMethodID(jklass_, "", "()V"); + jobj_ = env_->NewObject(jklass_, constructor); + ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig; + } + + public: + // Available as statics so our JNI handlers can access these. + static jclass jklass_; + static jobject jobj_; + static jobject class_loader_; + + protected: + // We have to list the methods here so we can share them between default and generic JNI. + void CompileAndRunNoArgMethodImpl(); + void CompileAndRunIntMethodThroughStubImpl(); + void CompileAndRunStaticIntMethodThroughStubImpl(); + void CompileAndRunIntMethodImpl(); + void CompileAndRunIntIntMethodImpl(); + void CompileAndRunLongLongMethodImpl(); + void CompileAndRunDoubleDoubleMethodImpl(); + void CompileAndRun_fooJJ_synchronizedImpl(); + void CompileAndRunIntObjectObjectMethodImpl(); + void CompileAndRunStaticIntIntMethodImpl(); + void CompileAndRunStaticDoubleDoubleMethodImpl(); + void RunStaticLogDoubleMethodImpl(); + void RunStaticLogFloatMethodImpl(); + void RunStaticReturnTrueImpl(); + void RunStaticReturnFalseImpl(); + void RunGenericStaticReturnIntImpl(); + void RunGenericStaticReturnDoubleImpl(); + void RunGenericStaticReturnLongImpl(); + void CompileAndRunStaticIntObjectObjectMethodImpl(); + void CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl(); + void ExceptionHandlingImpl(); + void NativeStackTraceElementImpl(); + void ReturnGlobalRefImpl(); + void LocalReferenceTableClearingTestImpl(); + void JavaLangSystemArrayCopyImpl(); + void CompareAndSwapIntImpl(); + void GetTextImpl(); + void GetSinkPropertiesNativeImpl(); + void UpcallReturnTypeChecking_InstanceImpl(); + void UpcallReturnTypeChecking_StaticImpl(); + void UpcallArgumentTypeChecking_InstanceImpl(); + void UpcallArgumentTypeChecking_StaticImpl(); + void CompileAndRunFloatFloatMethodImpl(); + void CheckParameterAlignImpl(); + void MaxParamNumberImpl(); + void WithoutImplementationImpl(); + void WithoutImplementationRefReturnImpl(); + void StaticWithoutImplementationImpl(); + void StackArgsIntsFirstImpl(); + void StackArgsFloatsFirstImpl(); + void StackArgsMixedImpl(); + + void NormalNativeImpl(); + void FastNativeImpl(); + void CriticalNativeImpl(); + + JNIEnv* env_; + jmethodID jmethod_; + + private: + bool check_generic_jni_; +}; + +jclass JniCompilerTest::jklass_; +jobject JniCompilerTest::jobj_; +jobject JniCompilerTest::class_loader_; + +// Test the normal compiler and normal generic JNI only. +// The following features are unsupported in @FastNative: +// 1) synchronized keyword +# define JNI_TEST_NORMAL_ONLY(TestName) \ + TEST_F(JniCompilerTest, TestName ## NormalCompiler) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("Normal JNI with compiler"); \ + gCurrentJni = static_cast(JniKind::kNormal); \ + TestName ## Impl(); \ + } \ + TEST_F(JniCompilerTest, TestName ## NormalGeneric) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("Normal JNI with generic"); \ + gCurrentJni = static_cast(JniKind::kNormal); \ + SetCheckGenericJni(true); \ + TestName ## Impl(); \ + } + +// Test (normal, @FastNative) x (compiler, generic). +#define JNI_TEST(TestName) \ + JNI_TEST_NORMAL_ONLY(TestName) \ + TEST_F(JniCompilerTest, TestName ## FastCompiler) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("@FastNative JNI with compiler"); \ + gCurrentJni = static_cast(JniKind::kFast); \ + TestName ## Impl(); \ + } \ + \ + TEST_F(JniCompilerTest, TestName ## FastGeneric) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("@FastNative JNI with generic"); \ + gCurrentJni = static_cast(JniKind::kFast); \ + SetCheckGenericJni(true); \ + TestName ## Impl(); \ + } + +// Test (@CriticalNative) x (compiler, generic) only. +#define JNI_TEST_CRITICAL_ONLY(TestName) \ + TEST_F(JniCompilerTest, TestName ## CriticalCompiler) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("@CriticalNative JNI with compiler"); \ + gCurrentJni = static_cast(JniKind::kCritical); \ + TestName ## Impl(); \ + } \ + TEST_F(JniCompilerTest, TestName ## CriticalGeneric) { \ + ScopedCheckHandleScope top_handle_scope_check; \ + SCOPED_TRACE("@CriticalNative JNI with generic"); \ + gCurrentJni = static_cast(JniKind::kCritical); \ + SetCheckGenericJni(true); \ + TestName ## Impl(); \ + } + +// Test everything: (normal, @FastNative, @CriticalNative) x (compiler, generic). +#define JNI_TEST_CRITICAL(TestName) \ + JNI_TEST(TestName) \ + JNI_TEST_CRITICAL_ONLY(TestName) \ + +static void expectValidThreadState() { + // Normal JNI always transitions to "Native". Other JNIs stay in the "Runnable" state. + if (IsCurrentJniNormal()) { + EXPECT_EQ(kNative, Thread::Current()->GetState()); + } else { + EXPECT_EQ(kRunnable, Thread::Current()->GetState()); + } +} + +#define EXPECT_THREAD_STATE_FOR_CURRENT_JNI() expectValidThreadState() + +static void expectValidMutatorLockHeld() { + if (IsCurrentJniNormal()) { + Locks::mutator_lock_->AssertNotHeld(Thread::Current()); + } else { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + } +} + +#define EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI() expectValidMutatorLockHeld() + +static void expectValidJniEnvAndObject(JNIEnv* env, jobject thisObj) { + if (!IsCurrentJniCritical()) { + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + ASSERT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + } else { + LOG(FATAL) << "Objects are not supported for @CriticalNative, why is this being tested?"; + UNREACHABLE(); + } +} + +// Validates the JNIEnv to be the same as the current thread's JNIEnv, and makes sure +// that the object here is an instance of the class we registered the method with. +// +// Hard-fails if this somehow gets invoked for @CriticalNative since objects are unsupported. +#define EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj) \ + expectValidJniEnvAndObject(env, thisObj) + +static void expectValidJniEnvAndClass(JNIEnv* env, jclass kls) { + if (!IsCurrentJniCritical()) { + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + ASSERT_TRUE(kls != nullptr); + EXPECT_TRUE(env->IsSameObject(static_cast(JniCompilerTest::jklass_), + static_cast(kls))); + } else { + // This is pretty much vacuously true but catch any testing setup mistakes. + EXPECT_EQ(env, kCriticalDummyJniEnv); + EXPECT_EQ(kls, kCriticalDummyJniClass); + } +} + +// Validates the JNIEnv is the same as the current thread's JNIenv, and makes sure +// that the jclass we got in the JNI handler is the same one as the class the method was looked +// up for. +// +// (Checks are skipped for @CriticalNative since the two values are dummy). +#define EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls) expectValidJniEnvAndClass(env, kls) + +// Temporarily disable the EXPECT_NUM_STACK_REFERENCES check (for a single test). +struct ScopedDisableCheckNumStackReferences { + ScopedDisableCheckNumStackReferences() { + CHECK(sCheckNumStackReferences); // No nested support. + sCheckNumStackReferences = false; + } + + ~ScopedDisableCheckNumStackReferences() { + sCheckNumStackReferences = true; + } + + static bool sCheckNumStackReferences; +}; + +bool ScopedDisableCheckNumStackReferences::sCheckNumStackReferences = true; + +// Check that the handle scope at the start of this block is the same +// as the handle scope at the end of the block. +struct ScopedCheckHandleScope { + ScopedCheckHandleScope() : handle_scope_(Thread::Current()->GetTopHandleScope()) { + } + + ~ScopedCheckHandleScope() { + EXPECT_EQ(handle_scope_, Thread::Current()->GetTopHandleScope()) + << "Top-most handle scope must be the same after all the JNI " + << "invocations have finished (as before they were invoked)."; + } + + BaseHandleScope* const handle_scope_; +}; + +// Number of references allocated in JNI ShadowFrames on the given thread. +static size_t NumJniShadowFrameReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { + return self->GetManagedStack()->NumJniShadowFrameReferences(); +} + +// Number of references in handle scope on the given thread. +static size_t NumHandleReferences(Thread* self) { + size_t count = 0; + for (BaseHandleScope* cur = self->GetTopHandleScope(); cur != nullptr; cur = cur->GetLink()) { + count += cur->NumberOfReferences(); + } + return count; +} + +// Number of references allocated in handle scopes & JNI shadow frames on this thread. +static size_t NumStackReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { + return NumHandleReferences(self) + NumJniShadowFrameReferences(self); +} + +static void expectNumStackReferences(size_t val1, size_t val2) { + // In rare cases when JNI functions call themselves recursively, + // disable this test because it will have a false negative. + if (!IsCurrentJniCritical() && ScopedDisableCheckNumStackReferences::sCheckNumStackReferences) { + /* @CriticalNative doesn't build a HandleScope, so this test is meaningless then. */ + ScopedObjectAccess soa(Thread::Current()); + + size_t actual_num = NumStackReferences(Thread::Current()); + // XX: Not too sure what's going on. + // Sometimes null references get placed and sometimes they don't? + EXPECT_TRUE(val1 == actual_num || val2 == actual_num) + << "expected either " << val1 << " or " << val2 + << " number of stack references, but got: " << actual_num; + } +} + +#define EXPECT_NUM_STACK_REFERENCES(val1, val2) expectNumStackReferences(val1, val2) + +template +struct make_jni_test_decorator; + +// Decorator for "static" JNI callbacks. +template +struct make_jni_test_decorator { + static R apply(JNIEnv* env, jclass kls, Args ... args) { + EXPECT_THREAD_STATE_FOR_CURRENT_JNI(); + EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI(); + EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls); + // All incoming parameters + the jclass get put into the transition's StackHandleScope. + EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(kls, args...), + (count_refs_helper::value)); + + return fn(env, kls, args...); + } +}; + +// Decorator for instance JNI callbacks. +template +struct make_jni_test_decorator { + static R apply(JNIEnv* env, jobject thisObj, Args ... args) { + EXPECT_THREAD_STATE_FOR_CURRENT_JNI(); + EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI(); + EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj); + // All incoming parameters + the implicit 'this' get put into the transition's StackHandleScope. + EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...), + (count_refs_helper::value)); + + return fn(env, thisObj, args...); + } +}; + +// Decorate the regular JNI callee with the extra gtest checks. +// This way we can have common test logic for everything generic like checking if a lock is held, +// checking handle scope state, etc. +#define MAKE_JNI_TEST_DECORATOR(fn) make_jni_test_decorator::apply + +// Convert function f(JNIEnv*,jclass,a,b,c,d...) into f2(a,b,c,d...) +// -- This way we don't have to write out each implementation twice for @CriticalNative. +#define JNI_CRITICAL_WRAPPER(func) jni_remove_extra_parameters::apply +// Get a function pointer whose calling convention either matches a regular native +// or a critical native depending on which kind of jni is currently under test. +// -- This also has the benefit of genering a compile time error if the 'func' doesn't properly +// have JNIEnv and jclass parameters first. +#define CURRENT_JNI_WRAPPER(func) \ + (IsCurrentJniCritical() \ + ? reinterpret_cast(&JNI_CRITICAL_WRAPPER(MAKE_JNI_TEST_DECORATOR(func))) \ + : reinterpret_cast(&MAKE_JNI_TEST_DECORATOR(func))) + +// Do the opposite of the above. Do *not* wrap the function, instead just cast it to a void*. +// Only for "TEST_JNI_NORMAL_ONLY" configs, and it inserts a test assert to ensure this is the case. +#define NORMAL_JNI_ONLY_NOWRAP(func) \ + ({ ASSERT_TRUE(IsCurrentJniNormal()); reinterpret_cast(&(func)); }) +// Same as above, but with nullptr. When we want to test the stub functionality. +#define NORMAL_OR_FAST_JNI_ONLY_NULLPTR \ + ({ ASSERT_TRUE(IsCurrentJniNormal() || IsCurrentJniFast()); nullptr; }) + + +int gJava_MyClassNatives_foo_calls[kJniKindCount] = {}; +void Java_MyClassNatives_foo(JNIEnv*, jobject) { + gJava_MyClassNatives_foo_calls[gCurrentJni]++; +} + +void JniCompilerTest::CompileAndRunNoArgMethodImpl() { + SetUpForTest(false, "foo", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_foo)); + + EXPECT_EQ(0, gJava_MyClassNatives_foo_calls[gCurrentJni]); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(2, gJava_MyClassNatives_foo_calls[gCurrentJni]); + + gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunNoArgMethod) + +void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { + SetUpForTest(false, "bar", "(I)I", NORMAL_OR_FAST_JNI_ONLY_NULLPTR); + // calling through stub will link with &Java_MyClassNatives_bar{,_1Fast} + + std::string reason; + ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> + LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + << reason; + + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); + EXPECT_EQ(25, result); +} + +// Note: @CriticalNative is only for static methods. +JNI_TEST(CompileAndRunIntMethodThroughStub) + +void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { + SetUpForTest(true, "sbar", "(I)I", nullptr); + // calling through stub will link with &Java_MyClassNatives_sbar{,_1Fast,_1Critical} + + std::string reason; + ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> + LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + << reason; + + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); + EXPECT_EQ(43, result); +} + +JNI_TEST_CRITICAL(CompileAndRunStaticIntMethodThroughStub) + +int gJava_MyClassNatives_fooI_calls[kJniKindCount] = {}; +jint Java_MyClassNatives_fooI(JNIEnv*, jobject, jint x) { + gJava_MyClassNatives_fooI_calls[gCurrentJni]++; + return x; +} + +void JniCompilerTest::CompileAndRunIntMethodImpl() { + SetUpForTest(false, "fooI", "(I)I", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooI)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooI_calls[gCurrentJni]); + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 42); + EXPECT_EQ(42, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooI_calls[gCurrentJni]); + result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D); + EXPECT_EQ(static_cast(0xCAFED00D), result); + EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls[gCurrentJni]); + + gJava_MyClassNatives_fooI_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunIntMethod) + +int gJava_MyClassNatives_fooII_calls[kJniKindCount] = {}; +jint Java_MyClassNatives_fooII(JNIEnv*, jobject, jint x, jint y) { + gJava_MyClassNatives_fooII_calls[gCurrentJni]++; + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunIntIntMethodImpl() { + SetUpForTest(false, "fooII", "(II)I", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooII)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooII_calls[gCurrentJni]); + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 99, 10); + EXPECT_EQ(99 - 10, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooII_calls[gCurrentJni]); + result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFEBABE, + 0xCAFED00D); + EXPECT_EQ(static_cast(0xCAFEBABE - 0xCAFED00D), result); + EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls[gCurrentJni]); + + gJava_MyClassNatives_fooII_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunIntIntMethod) + +int gJava_MyClassNatives_fooJJ_calls[kJniKindCount] = {}; +jlong Java_MyClassNatives_fooJJ(JNIEnv*, jobject, jlong x, jlong y) { + gJava_MyClassNatives_fooJJ_calls[gCurrentJni]++; + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunLongLongMethodImpl() { + SetUpForTest(false, "fooJJ", "(JJ)J", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); + jlong a = INT64_C(0x1234567890ABCDEF); + jlong b = INT64_C(0xFEDCBA0987654321); + jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_EQ(a - b, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); + result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a); + EXPECT_EQ(b - a, result); + EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); + + gJava_MyClassNatives_fooJJ_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunLongLongMethod) + +int gJava_MyClassNatives_fooDD_calls[kJniKindCount] = {}; +jdouble Java_MyClassNatives_fooDD(JNIEnv*, jobject, jdouble x, jdouble y) { + gJava_MyClassNatives_fooDD_calls[gCurrentJni]++; + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() { + SetUpForTest(false, "fooDD", "(DD)D", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooDD)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); + jdouble result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, + 99.0, 10.0); + EXPECT_DOUBLE_EQ(99.0 - 10.0, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); + jdouble a = 3.14159265358979323846; + jdouble b = 0.69314718055994530942; + result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_DOUBLE_EQ(a - b, result); + EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); + + gJava_MyClassNatives_fooDD_calls[gCurrentJni] = 0; +} + +int gJava_MyClassNatives_fooJJ_synchronized_calls[kJniKindCount] = {}; +jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv*, jobject, jlong x, jlong y) { + gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]++; + return x | y; +} + +void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() { + SetUpForTest(false, "fooJJ_synchronized", "(JJ)J", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ_synchronized)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]); + jlong a = 0x1000000020000000ULL; + jlong b = 0x00ff000000aa0000ULL; + jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_EQ(a | b, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]); + + gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni] = 0; +} + +JNI_TEST_NORMAL_ONLY(CompileAndRun_fooJJ_synchronized) + +int gJava_MyClassNatives_fooIOO_calls[kJniKindCount] = {}; +jobject Java_MyClassNatives_fooIOO(JNIEnv*, jobject thisObj, jint x, jobject y, + jobject z) { + gJava_MyClassNatives_fooIOO_calls[gCurrentJni]++; + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return thisObj; + } +} + +void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() { + SetUpForTest(false, "fooIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); + + gJava_MyClassNatives_fooIOO_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunIntObjectObjectMethod) + +int gJava_MyClassNatives_fooSII_calls[kJniKindCount] = {}; +jint Java_MyClassNatives_fooSII(JNIEnv* env ATTRIBUTE_UNUSED, + jclass klass ATTRIBUTE_UNUSED, + jint x, + jint y) { + gJava_MyClassNatives_fooSII_calls[gCurrentJni]++; + return x + y; +} + +void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() { + SetUpForTest(true, "fooSII", "(II)I", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSII)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSII_calls[gCurrentJni]); + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30); + EXPECT_EQ(50, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls[gCurrentJni]); + + gJava_MyClassNatives_fooSII_calls[gCurrentJni] = 0; +} + +JNI_TEST_CRITICAL(CompileAndRunStaticIntIntMethod) + +int gJava_MyClassNatives_fooSDD_calls[kJniKindCount] = {}; +jdouble Java_MyClassNatives_fooSDD(JNIEnv* env ATTRIBUTE_UNUSED, + jclass klass ATTRIBUTE_UNUSED, + jdouble x, + jdouble y) { + gJava_MyClassNatives_fooSDD_calls[gCurrentJni]++; + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() { + SetUpForTest(true, "fooSDD", "(DD)D", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSDD)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); + jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 99.0, 10.0); + EXPECT_DOUBLE_EQ(99.0 - 10.0, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); + jdouble a = 3.14159265358979323846; + jdouble b = 0.69314718055994530942; + result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b); + EXPECT_DOUBLE_EQ(a - b, result); + EXPECT_DOUBLE_EQ(2, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); + + gJava_MyClassNatives_fooSDD_calls[gCurrentJni] = 0; +} + +JNI_TEST_CRITICAL(CompileAndRunStaticDoubleDoubleMethod) + +// The x86 generic JNI code had a bug where it assumed a floating +// point return value would be in xmm0. We use log, to somehow ensure +// the compiler will use the floating point stack. + +jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) { + return log(x); +} + +jdouble Java_MyClassNatives_logD_notNormal(JNIEnv*, jclass, jdouble x) { + EXPECT_DOUBLE_EQ(2.0, x); + return log(x); +} + +void JniCompilerTest::RunStaticLogDoubleMethodImpl() { + void* jni_handler; + if (IsCurrentJniNormal()) { + // This test seems a bit special, don't use a JNI wrapper here. + jni_handler = NORMAL_JNI_ONLY_NOWRAP(Java_MyClassNatives_logD); + } else { + jni_handler = CURRENT_JNI_WRAPPER(Java_MyClassNatives_logD_notNormal); + } + SetUpForTest(true, "logD", "(D)D", jni_handler); + + jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0); + EXPECT_DOUBLE_EQ(log(2.0), result); +} + +JNI_TEST_CRITICAL(RunStaticLogDoubleMethod) + +jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) { + return logf(x); +} + +void JniCompilerTest::RunStaticLogFloatMethodImpl() { + void* jni_handler; + if (IsCurrentJniNormal()) { + // This test seems a bit special, don't use a JNI wrapper here. + jni_handler = NORMAL_JNI_ONLY_NOWRAP(Java_MyClassNatives_logF); + } else { + jni_handler = CURRENT_JNI_WRAPPER(Java_MyClassNatives_logF); + } + + SetUpForTest(true, "logF", "(F)F", jni_handler); + + jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0); + EXPECT_FLOAT_EQ(logf(2.0), result); +} + +JNI_TEST_CRITICAL(RunStaticLogFloatMethod) + +jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) { + return JNI_TRUE; +} + +jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) { + return JNI_FALSE; +} + +jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) { + return 42; +} + +void JniCompilerTest::RunStaticReturnTrueImpl() { + SetUpForTest(true, "returnTrue", "()Z", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnTrue)); + + jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); + EXPECT_TRUE(result); +} + +JNI_TEST_CRITICAL(RunStaticReturnTrue) + +void JniCompilerTest::RunStaticReturnFalseImpl() { + SetUpForTest(true, "returnFalse", "()Z", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnFalse)); + + jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); + EXPECT_FALSE(result); +} + +JNI_TEST_CRITICAL(RunStaticReturnFalse) + +void JniCompilerTest::RunGenericStaticReturnIntImpl() { + SetUpForTest(true, "returnInt", "()I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnInt)); + + jint result = env_->CallStaticIntMethod(jklass_, jmethod_); + EXPECT_EQ(42, result); +} + +JNI_TEST_CRITICAL(RunGenericStaticReturnInt) + +int gJava_MyClassNatives_returnDouble_calls[kJniKindCount] = {}; +jdouble Java_MyClassNatives_returnDouble(JNIEnv*, jclass) { + gJava_MyClassNatives_returnDouble_calls[gCurrentJni]++; + return 4.0; +} + +void JniCompilerTest::RunGenericStaticReturnDoubleImpl() { + SetUpForTest(true, "returnDouble", "()D", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnDouble)); + + jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_); + EXPECT_DOUBLE_EQ(4.0, result); + EXPECT_EQ(1, gJava_MyClassNatives_returnDouble_calls[gCurrentJni]); + + gJava_MyClassNatives_returnDouble_calls[gCurrentJni] = 0; +} + +JNI_TEST_CRITICAL(RunGenericStaticReturnDouble) + +jlong Java_MyClassNatives_returnLong(JNIEnv*, jclass) { + return 0xFEEDDEADFEEDL; +} + +void JniCompilerTest::RunGenericStaticReturnLongImpl() { + SetUpForTest(true, "returnLong", "()J", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnLong)); + + jlong result = env_->CallStaticLongMethod(jklass_, jmethod_); + EXPECT_EQ(0xFEEDDEADFEEDL, result); +} + +JNI_TEST_CRITICAL(RunGenericStaticReturnLong) + +int gJava_MyClassNatives_fooSIOO_calls[kJniKindCount] = {}; +jobject Java_MyClassNatives_fooSIOO(JNIEnv*, jclass klass, jint x, jobject y, jobject z) { + gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]++; + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return klass; + } +} + +void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() { + SetUpForTest(true, "fooSIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); + + gJava_MyClassNatives_fooSIOO_calls[gCurrentJni] = 0; +} + +JNI_TEST(CompileAndRunStaticIntObjectObjectMethod) + +int gJava_MyClassNatives_fooSSIOO_calls[kJniKindCount] = {}; +jobject Java_MyClassNatives_fooSSIOO(JNIEnv*, jclass klass, jint x, jobject y, jobject z) { + gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]++; + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return klass; + } +} + +void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() { + SetUpForTest(true, "fooSSIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSSIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); + + gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni] = 0; +} + +// TODO: Maybe. @FastNative support for returning Objects? +JNI_TEST_NORMAL_ONLY(CompileAndRunStaticSynchronizedIntObjectObjectMethod) + +void Java_MyClassNatives_throwException(JNIEnv* env, jobject) { + jclass c = env->FindClass("java/lang/RuntimeException"); + env->ThrowNew(c, "hello"); +} + +void JniCompilerTest::ExceptionHandlingImpl() { + { + ASSERT_FALSE(runtime_->IsStarted()); + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + + // all compilation needs to happen before Runtime::Start + CompileForTestWithCurrentJni(class_loader_, false, "foo", "()V"); + CompileForTestWithCurrentJni(class_loader_, false, "throwException", "()V"); + CompileForTestWithCurrentJni(class_loader_, false, "foo", "()V"); + } + // Start runtime to avoid re-initialization in SetupForTest. + Thread::Current()->TransitionFromSuspendedToRunnable(); + bool started = runtime_->Start(); + CHECK(started); + + gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; + + // Check a single call of a JNI method is ok + SetUpForTest(false, "foo", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_foo)); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); + EXPECT_FALSE(Thread::Current()->IsExceptionPending()); + + // Get class for exception we expect to be thrown + ScopedLocalRef jlre(env_, env_->FindClass("java/lang/RuntimeException")); + SetUpForTest(false, "throwException", "()V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_throwException)); + // Call Java_MyClassNatives_throwException (JNI method that throws exception) + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); + ScopedLocalRef exception(env_, env_->ExceptionOccurred()); + env_->ExceptionClear(); + EXPECT_TRUE(env_->IsInstanceOf(exception.get(), jlre.get())); + + // Check a single call of a JNI method is ok + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(2, gJava_MyClassNatives_foo_calls[gCurrentJni]); + + gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; +} + +JNI_TEST(ExceptionHandling) + +jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { + if (i <= 0) { + // We want to check raw Object* / Array* below + ScopedObjectAccess soa(env); + + // Build stack trace + jobject internal = Thread::Current()->CreateInternalStackTrace(soa); + jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); + ObjPtr> trace_array = + soa.Decode>(ste_array); + EXPECT_TRUE(trace_array != nullptr); + EXPECT_EQ(11, trace_array->GetLength()); + + // Check stack trace entries have expected values + for (int32_t j = 0; j < trace_array->GetLength(); ++j) { + EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber()); + ObjPtr ste = trace_array->Get(j); + EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_EQ(("fooI" + CurrentJniStringSuffix()), ste->GetMethodName()->ToModifiedUtf8()); + } + + // end recursion + return 0; + } else { + jclass jklass = env->FindClass("MyClassNatives"); + EXPECT_TRUE(jklass != nullptr); + jmethodID jmethod = env->GetMethodID(jklass, + ("fooI" + CurrentJniStringSuffix()).c_str(), + "(I)I"); + EXPECT_TRUE(jmethod != nullptr); + + // Recurse with i - 1 + jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1); + + // Return sum of all depths + return i + result; + } +} + +void JniCompilerTest::NativeStackTraceElementImpl() { + SetUpForTest(false, "fooI", "(I)I", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_nativeUpCall)); + + // Usual # local references on stack check fails because nativeUpCall calls itself recursively, + // each time the # of local references will therefore go up. + ScopedDisableCheckNumStackReferences disable_num_stack_check; + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10); + + EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result); +} + +JNI_TEST(NativeStackTraceElement) + +jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) { + return env->NewGlobalRef(x); +} + +void JniCompilerTest::ReturnGlobalRefImpl() { + SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooO)); + jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_); + EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(result)); + EXPECT_TRUE(env_->IsSameObject(result, jobj_)); +} + +JNI_TEST(ReturnGlobalRef) + +jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { + // Add 10 local references + ScopedObjectAccess soa(env); + for (int i = 0; i < 10; i++) { + soa.AddLocalReference(soa.Decode(thisObj)); + } + return x+1; +} + +void JniCompilerTest::LocalReferenceTableClearingTestImpl() { + SetUpForTest(false, "fooI", "(I)I", CURRENT_JNI_WRAPPER(local_ref_test)); + // 1000 invocations of a method that adds 10 local references + for (int i = 0; i < 1000; i++) { + jint result = env_->CallIntMethod(jobj_, jmethod_, i); + EXPECT_TRUE(result == i + 1); + } +} + +JNI_TEST(LocalReferenceTableClearingTest) + +void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) { + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, src)); + EXPECT_EQ(1234, src_pos); + EXPECT_EQ(5678, dst_pos); + EXPECT_EQ(9876, length); +} + +void JniCompilerTest::JavaLangSystemArrayCopyImpl() { + SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V", + CURRENT_JNI_WRAPPER(my_arraycopy)); + env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876); +} + +JNI_TEST(JavaLangSystemArrayCopy) + +jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) { + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj)); + EXPECT_EQ(INT64_C(0x12345678ABCDEF88), offset); + EXPECT_EQ(static_cast(0xCAFEF00D), expected); + EXPECT_EQ(static_cast(0xEBADF00D), newval); + return JNI_TRUE; +} + +void JniCompilerTest::CompareAndSwapIntImpl() { + SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z", + CURRENT_JNI_WRAPPER(my_casi)); + jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, INT64_C(0x12345678ABCDEF88), + 0xCAFEF00D, 0xEBADF00D); + EXPECT_EQ(result, JNI_TRUE); +} + +JNI_TEST(CompareAndSwapInt) + +jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) { + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2)); + EXPECT_EQ(0x12345678ABCDEF88LL, val1); + EXPECT_EQ(0x7FEDCBA987654321LL, val2); + return 42; +} + +void JniCompilerTest::GetTextImpl() { + SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", + CURRENT_JNI_WRAPPER(my_gettext)); + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_, + INT64_C(0x7FEDCBA987654321), jobj_); + EXPECT_EQ(result, 42); +} + +JNI_TEST(GetText) + +int gJava_MyClassNatives_GetSinkProperties_calls[kJniKindCount] = {}; +jarray Java_MyClassNatives_GetSinkProperties(JNIEnv*, jobject thisObj, jstring s) { + EXPECT_EQ(s, nullptr); + gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]++; + + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + EXPECT_TRUE(self->HoldsLock(soa.Decode(thisObj))); + return nullptr; +} + +void JniCompilerTest::GetSinkPropertiesNativeImpl() { + SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_GetSinkProperties)); + + EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]); + jarray result = down_cast( + env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr)); + EXPECT_EQ(nullptr, result); + EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]); + + gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni] = 0; +} + +// @FastNative doesn't support 'synchronized' keyword and +// never will -- locking functions aren't fast. +JNI_TEST_NORMAL_ONLY(GetSinkPropertiesNative) + +// This should return jclass, but we're imitating a bug pattern. +jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) { + return env->NewStringUTF("not a class!"); +} + +// This should return jclass, but we're imitating a bug pattern. +jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclass) { + return env->NewStringUTF("not a class!"); +} + +void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() { + SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_instanceMethodThatShouldReturnClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // This native method is bad, and tries to return a jstring as a jclass. + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + + "of java.lang.String from java.lang.Class " + + "MyClassNatives.instanceMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "()"); + + // Here, we just call the method incorrectly; we should catch that too. + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + + "of java.lang.String from java.lang.Class " + + "MyClassNatives.instanceMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "()"); + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "calling non-static method " + + "java.lang.Class " + + "MyClassNatives.instanceMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "() with CallStaticObjectMethodV"); +} + +JNI_TEST(UpcallReturnTypeChecking_Instance) + +void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() { + SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_staticMethodThatShouldReturnClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // This native method is bad, and tries to return a jstring as a jclass. + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + + "of java.lang.String from java.lang.Class " + + "MyClassNatives.staticMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "()"); + + // Here, we just call the method incorrectly; we should catch that too. + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + + "of java.lang.String from java.lang.Class " + + "MyClassNatives.staticMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "()"); + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check(std::string() + "calling static method " + + "java.lang.Class " + + "MyClassNatives.staticMethodThatShouldReturnClass" + + CurrentJniStringSuffix() + "() with CallObjectMethodV"); +} + +JNI_TEST(UpcallReturnTypeChecking_Static) + +// This should take jclass, but we're imitating a bug pattern. +void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) { +} + +// This should take jclass, but we're imitating a bug pattern. +void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) { +} + +void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_instanceMethodThatShouldTakeClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // We deliberately pass a bad second argument here. + env_->CallVoidMethod(jobj_, jmethod_, 123, env_->NewStringUTF("not a class!")); + check_jni_abort_catcher.Check(std::string() + "bad arguments passed to void " + + "MyClassNatives.instanceMethodThatShouldTakeClass" + + CurrentJniStringSuffix() + "(int, java.lang.Class)"); +} + +JNI_TEST(UpcallArgumentTypeChecking_Instance) + +void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_staticMethodThatShouldTakeClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // We deliberately pass a bad second argument here. + env_->CallStaticVoidMethod(jklass_, jmethod_, 123, env_->NewStringUTF("not a class!")); + check_jni_abort_catcher.Check(std::string() + "bad arguments passed to void " + + "MyClassNatives.staticMethodThatShouldTakeClass" + + CurrentJniStringSuffix() + "(int, java.lang.Class)"); +} + +JNI_TEST(UpcallArgumentTypeChecking_Static) + +jfloat Java_MyClassNatives_checkFloats(JNIEnv*, jobject, jfloat f1, jfloat f2) { + return f1 - f2; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() { + SetUpForTest(false, "checkFloats", "(FF)F", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_checkFloats)); + + jfloat result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, + 99.0F, 10.0F); + EXPECT_FLOAT_EQ(99.0F - 10.0F, result); + jfloat a = 3.14159F; + jfloat b = 0.69314F; + result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_FLOAT_EQ(a - b, result); +} + +JNI_TEST(CompileAndRunFloatFloatMethod) + +void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED, + jobject thisObj ATTRIBUTE_UNUSED, + jint i1, + jlong l1) { + EXPECT_EQ(i1, 1234); + EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0)); +} + +void JniCompilerTest::CheckParameterAlignImpl() { + SetUpForTest(false, "checkParameterAlign", "(IJ)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_checkParameterAlign)); + + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_, 1234, INT64_C(0x12345678ABCDEF0)); +} + +JNI_TEST(CheckParameterAlign) + +void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject, + jobject o0, jobject o1, jobject o2, jobject o3, jobject o4, jobject o5, jobject o6, jobject o7, + jobject o8, jobject o9, jobject o10, jobject o11, jobject o12, jobject o13, jobject o14, jobject o15, + jobject o16, jobject o17, jobject o18, jobject o19, jobject o20, jobject o21, jobject o22, jobject o23, + jobject o24, jobject o25, jobject o26, jobject o27, jobject o28, jobject o29, jobject o30, jobject o31, + jobject o32, jobject o33, jobject o34, jobject o35, jobject o36, jobject o37, jobject o38, jobject o39, + jobject o40, jobject o41, jobject o42, jobject o43, jobject o44, jobject o45, jobject o46, jobject o47, + jobject o48, jobject o49, jobject o50, jobject o51, jobject o52, jobject o53, jobject o54, jobject o55, + jobject o56, jobject o57, jobject o58, jobject o59, jobject o60, jobject o61, jobject o62, jobject o63, + jobject o64, jobject o65, jobject o66, jobject o67, jobject o68, jobject o69, jobject o70, jobject o71, + jobject o72, jobject o73, jobject o74, jobject o75, jobject o76, jobject o77, jobject o78, jobject o79, + jobject o80, jobject o81, jobject o82, jobject o83, jobject o84, jobject o85, jobject o86, jobject o87, + jobject o88, jobject o89, jobject o90, jobject o91, jobject o92, jobject o93, jobject o94, jobject o95, + jobject o96, jobject o97, jobject o98, jobject o99, jobject o100, jobject o101, jobject o102, jobject o103, + jobject o104, jobject o105, jobject o106, jobject o107, jobject o108, jobject o109, jobject o110, jobject o111, + jobject o112, jobject o113, jobject o114, jobject o115, jobject o116, jobject o117, jobject o118, jobject o119, + jobject o120, jobject o121, jobject o122, jobject o123, jobject o124, jobject o125, jobject o126, jobject o127, + jobject o128, jobject o129, jobject o130, jobject o131, jobject o132, jobject o133, jobject o134, jobject o135, + jobject o136, jobject o137, jobject o138, jobject o139, jobject o140, jobject o141, jobject o142, jobject o143, + jobject o144, jobject o145, jobject o146, jobject o147, jobject o148, jobject o149, jobject o150, jobject o151, + jobject o152, jobject o153, jobject o154, jobject o155, jobject o156, jobject o157, jobject o158, jobject o159, + jobject o160, jobject o161, jobject o162, jobject o163, jobject o164, jobject o165, jobject o166, jobject o167, + jobject o168, jobject o169, jobject o170, jobject o171, jobject o172, jobject o173, jobject o174, jobject o175, + jobject o176, jobject o177, jobject o178, jobject o179, jobject o180, jobject o181, jobject o182, jobject o183, + jobject o184, jobject o185, jobject o186, jobject o187, jobject o188, jobject o189, jobject o190, jobject o191, + jobject o192, jobject o193, jobject o194, jobject o195, jobject o196, jobject o197, jobject o198, jobject o199, + jobject o200, jobject o201, jobject o202, jobject o203, jobject o204, jobject o205, jobject o206, jobject o207, + jobject o208, jobject o209, jobject o210, jobject o211, jobject o212, jobject o213, jobject o214, jobject o215, + jobject o216, jobject o217, jobject o218, jobject o219, jobject o220, jobject o221, jobject o222, jobject o223, + jobject o224, jobject o225, jobject o226, jobject o227, jobject o228, jobject o229, jobject o230, jobject o231, + jobject o232, jobject o233, jobject o234, jobject o235, jobject o236, jobject o237, jobject o238, jobject o239, + jobject o240, jobject o241, jobject o242, jobject o243, jobject o244, jobject o245, jobject o246, jobject o247, + jobject o248, jobject o249, jobject o250, jobject o251, jobject o252, jobject o253) { + // two tests possible + if (o0 == nullptr) { + // 1) everything is null + EXPECT_TRUE(o0 == nullptr && o1 == nullptr && o2 == nullptr && o3 == nullptr && o4 == nullptr + && o5 == nullptr && o6 == nullptr && o7 == nullptr && o8 == nullptr && o9 == nullptr + && o10 == nullptr && o11 == nullptr && o12 == nullptr && o13 == nullptr && o14 == nullptr + && o15 == nullptr && o16 == nullptr && o17 == nullptr && o18 == nullptr && o19 == nullptr + && o20 == nullptr && o21 == nullptr && o22 == nullptr && o23 == nullptr && o24 == nullptr + && o25 == nullptr && o26 == nullptr && o27 == nullptr && o28 == nullptr && o29 == nullptr + && o30 == nullptr && o31 == nullptr && o32 == nullptr && o33 == nullptr && o34 == nullptr + && o35 == nullptr && o36 == nullptr && o37 == nullptr && o38 == nullptr && o39 == nullptr + && o40 == nullptr && o41 == nullptr && o42 == nullptr && o43 == nullptr && o44 == nullptr + && o45 == nullptr && o46 == nullptr && o47 == nullptr && o48 == nullptr && o49 == nullptr + && o50 == nullptr && o51 == nullptr && o52 == nullptr && o53 == nullptr && o54 == nullptr + && o55 == nullptr && o56 == nullptr && o57 == nullptr && o58 == nullptr && o59 == nullptr + && o60 == nullptr && o61 == nullptr && o62 == nullptr && o63 == nullptr && o64 == nullptr + && o65 == nullptr && o66 == nullptr && o67 == nullptr && o68 == nullptr && o69 == nullptr + && o70 == nullptr && o71 == nullptr && o72 == nullptr && o73 == nullptr && o74 == nullptr + && o75 == nullptr && o76 == nullptr && o77 == nullptr && o78 == nullptr && o79 == nullptr + && o80 == nullptr && o81 == nullptr && o82 == nullptr && o83 == nullptr && o84 == nullptr + && o85 == nullptr && o86 == nullptr && o87 == nullptr && o88 == nullptr && o89 == nullptr + && o90 == nullptr && o91 == nullptr && o92 == nullptr && o93 == nullptr && o94 == nullptr + && o95 == nullptr && o96 == nullptr && o97 == nullptr && o98 == nullptr && o99 == nullptr + && o100 == nullptr && o101 == nullptr && o102 == nullptr && o103 == nullptr && o104 == nullptr + && o105 == nullptr && o106 == nullptr && o107 == nullptr && o108 == nullptr && o109 == nullptr + && o110 == nullptr && o111 == nullptr && o112 == nullptr && o113 == nullptr && o114 == nullptr + && o115 == nullptr && o116 == nullptr && o117 == nullptr && o118 == nullptr && o119 == nullptr + && o120 == nullptr && o121 == nullptr && o122 == nullptr && o123 == nullptr && o124 == nullptr + && o125 == nullptr && o126 == nullptr && o127 == nullptr && o128 == nullptr && o129 == nullptr + && o130 == nullptr && o131 == nullptr && o132 == nullptr && o133 == nullptr && o134 == nullptr + && o135 == nullptr && o136 == nullptr && o137 == nullptr && o138 == nullptr && o139 == nullptr + && o140 == nullptr && o141 == nullptr && o142 == nullptr && o143 == nullptr && o144 == nullptr + && o145 == nullptr && o146 == nullptr && o147 == nullptr && o148 == nullptr && o149 == nullptr + && o150 == nullptr && o151 == nullptr && o152 == nullptr && o153 == nullptr && o154 == nullptr + && o155 == nullptr && o156 == nullptr && o157 == nullptr && o158 == nullptr && o159 == nullptr + && o160 == nullptr && o161 == nullptr && o162 == nullptr && o163 == nullptr && o164 == nullptr + && o165 == nullptr && o166 == nullptr && o167 == nullptr && o168 == nullptr && o169 == nullptr + && o170 == nullptr && o171 == nullptr && o172 == nullptr && o173 == nullptr && o174 == nullptr + && o175 == nullptr && o176 == nullptr && o177 == nullptr && o178 == nullptr && o179 == nullptr + && o180 == nullptr && o181 == nullptr && o182 == nullptr && o183 == nullptr && o184 == nullptr + && o185 == nullptr && o186 == nullptr && o187 == nullptr && o188 == nullptr && o189 == nullptr + && o190 == nullptr && o191 == nullptr && o192 == nullptr && o193 == nullptr && o194 == nullptr + && o195 == nullptr && o196 == nullptr && o197 == nullptr && o198 == nullptr && o199 == nullptr + && o200 == nullptr && o201 == nullptr && o202 == nullptr && o203 == nullptr && o204 == nullptr + && o205 == nullptr && o206 == nullptr && o207 == nullptr && o208 == nullptr && o209 == nullptr + && o210 == nullptr && o211 == nullptr && o212 == nullptr && o213 == nullptr && o214 == nullptr + && o215 == nullptr && o216 == nullptr && o217 == nullptr && o218 == nullptr && o219 == nullptr + && o220 == nullptr && o221 == nullptr && o222 == nullptr && o223 == nullptr && o224 == nullptr + && o225 == nullptr && o226 == nullptr && o227 == nullptr && o228 == nullptr && o229 == nullptr + && o230 == nullptr && o231 == nullptr && o232 == nullptr && o233 == nullptr && o234 == nullptr + && o235 == nullptr && o236 == nullptr && o237 == nullptr && o238 == nullptr && o239 == nullptr + && o240 == nullptr && o241 == nullptr && o242 == nullptr && o243 == nullptr && o244 == nullptr + && o245 == nullptr && o246 == nullptr && o247 == nullptr && o248 == nullptr && o249 == nullptr + && o250 == nullptr && o251 == nullptr && o252 == nullptr && o253 == nullptr); + } else { + EXPECT_EQ(0, env->GetArrayLength(reinterpret_cast(o0))); + EXPECT_EQ(1, env->GetArrayLength(reinterpret_cast(o1))); + EXPECT_EQ(2, env->GetArrayLength(reinterpret_cast(o2))); + EXPECT_EQ(3, env->GetArrayLength(reinterpret_cast(o3))); + EXPECT_EQ(4, env->GetArrayLength(reinterpret_cast(o4))); + EXPECT_EQ(5, env->GetArrayLength(reinterpret_cast(o5))); + EXPECT_EQ(6, env->GetArrayLength(reinterpret_cast(o6))); + EXPECT_EQ(7, env->GetArrayLength(reinterpret_cast(o7))); + EXPECT_EQ(8, env->GetArrayLength(reinterpret_cast(o8))); + EXPECT_EQ(9, env->GetArrayLength(reinterpret_cast(o9))); + EXPECT_EQ(10, env->GetArrayLength(reinterpret_cast(o10))); + EXPECT_EQ(11, env->GetArrayLength(reinterpret_cast(o11))); + EXPECT_EQ(12, env->GetArrayLength(reinterpret_cast(o12))); + EXPECT_EQ(13, env->GetArrayLength(reinterpret_cast(o13))); + EXPECT_EQ(14, env->GetArrayLength(reinterpret_cast(o14))); + EXPECT_EQ(15, env->GetArrayLength(reinterpret_cast(o15))); + EXPECT_EQ(16, env->GetArrayLength(reinterpret_cast(o16))); + EXPECT_EQ(17, env->GetArrayLength(reinterpret_cast(o17))); + EXPECT_EQ(18, env->GetArrayLength(reinterpret_cast(o18))); + EXPECT_EQ(19, env->GetArrayLength(reinterpret_cast(o19))); + EXPECT_EQ(20, env->GetArrayLength(reinterpret_cast(o20))); + EXPECT_EQ(21, env->GetArrayLength(reinterpret_cast(o21))); + EXPECT_EQ(22, env->GetArrayLength(reinterpret_cast(o22))); + EXPECT_EQ(23, env->GetArrayLength(reinterpret_cast(o23))); + EXPECT_EQ(24, env->GetArrayLength(reinterpret_cast(o24))); + EXPECT_EQ(25, env->GetArrayLength(reinterpret_cast(o25))); + EXPECT_EQ(26, env->GetArrayLength(reinterpret_cast(o26))); + EXPECT_EQ(27, env->GetArrayLength(reinterpret_cast(o27))); + EXPECT_EQ(28, env->GetArrayLength(reinterpret_cast(o28))); + EXPECT_EQ(29, env->GetArrayLength(reinterpret_cast(o29))); + EXPECT_EQ(30, env->GetArrayLength(reinterpret_cast(o30))); + EXPECT_EQ(31, env->GetArrayLength(reinterpret_cast(o31))); + EXPECT_EQ(32, env->GetArrayLength(reinterpret_cast(o32))); + EXPECT_EQ(33, env->GetArrayLength(reinterpret_cast(o33))); + EXPECT_EQ(34, env->GetArrayLength(reinterpret_cast(o34))); + EXPECT_EQ(35, env->GetArrayLength(reinterpret_cast(o35))); + EXPECT_EQ(36, env->GetArrayLength(reinterpret_cast(o36))); + EXPECT_EQ(37, env->GetArrayLength(reinterpret_cast(o37))); + EXPECT_EQ(38, env->GetArrayLength(reinterpret_cast(o38))); + EXPECT_EQ(39, env->GetArrayLength(reinterpret_cast(o39))); + EXPECT_EQ(40, env->GetArrayLength(reinterpret_cast(o40))); + EXPECT_EQ(41, env->GetArrayLength(reinterpret_cast(o41))); + EXPECT_EQ(42, env->GetArrayLength(reinterpret_cast(o42))); + EXPECT_EQ(43, env->GetArrayLength(reinterpret_cast(o43))); + EXPECT_EQ(44, env->GetArrayLength(reinterpret_cast(o44))); + EXPECT_EQ(45, env->GetArrayLength(reinterpret_cast(o45))); + EXPECT_EQ(46, env->GetArrayLength(reinterpret_cast(o46))); + EXPECT_EQ(47, env->GetArrayLength(reinterpret_cast(o47))); + EXPECT_EQ(48, env->GetArrayLength(reinterpret_cast(o48))); + EXPECT_EQ(49, env->GetArrayLength(reinterpret_cast(o49))); + EXPECT_EQ(50, env->GetArrayLength(reinterpret_cast(o50))); + EXPECT_EQ(51, env->GetArrayLength(reinterpret_cast(o51))); + EXPECT_EQ(52, env->GetArrayLength(reinterpret_cast(o52))); + EXPECT_EQ(53, env->GetArrayLength(reinterpret_cast(o53))); + EXPECT_EQ(54, env->GetArrayLength(reinterpret_cast(o54))); + EXPECT_EQ(55, env->GetArrayLength(reinterpret_cast(o55))); + EXPECT_EQ(56, env->GetArrayLength(reinterpret_cast(o56))); + EXPECT_EQ(57, env->GetArrayLength(reinterpret_cast(o57))); + EXPECT_EQ(58, env->GetArrayLength(reinterpret_cast(o58))); + EXPECT_EQ(59, env->GetArrayLength(reinterpret_cast(o59))); + EXPECT_EQ(60, env->GetArrayLength(reinterpret_cast(o60))); + EXPECT_EQ(61, env->GetArrayLength(reinterpret_cast(o61))); + EXPECT_EQ(62, env->GetArrayLength(reinterpret_cast(o62))); + EXPECT_EQ(63, env->GetArrayLength(reinterpret_cast(o63))); + EXPECT_EQ(64, env->GetArrayLength(reinterpret_cast(o64))); + EXPECT_EQ(65, env->GetArrayLength(reinterpret_cast(o65))); + EXPECT_EQ(66, env->GetArrayLength(reinterpret_cast(o66))); + EXPECT_EQ(67, env->GetArrayLength(reinterpret_cast(o67))); + EXPECT_EQ(68, env->GetArrayLength(reinterpret_cast(o68))); + EXPECT_EQ(69, env->GetArrayLength(reinterpret_cast(o69))); + EXPECT_EQ(70, env->GetArrayLength(reinterpret_cast(o70))); + EXPECT_EQ(71, env->GetArrayLength(reinterpret_cast(o71))); + EXPECT_EQ(72, env->GetArrayLength(reinterpret_cast(o72))); + EXPECT_EQ(73, env->GetArrayLength(reinterpret_cast(o73))); + EXPECT_EQ(74, env->GetArrayLength(reinterpret_cast(o74))); + EXPECT_EQ(75, env->GetArrayLength(reinterpret_cast(o75))); + EXPECT_EQ(76, env->GetArrayLength(reinterpret_cast(o76))); + EXPECT_EQ(77, env->GetArrayLength(reinterpret_cast(o77))); + EXPECT_EQ(78, env->GetArrayLength(reinterpret_cast(o78))); + EXPECT_EQ(79, env->GetArrayLength(reinterpret_cast(o79))); + EXPECT_EQ(80, env->GetArrayLength(reinterpret_cast(o80))); + EXPECT_EQ(81, env->GetArrayLength(reinterpret_cast(o81))); + EXPECT_EQ(82, env->GetArrayLength(reinterpret_cast(o82))); + EXPECT_EQ(83, env->GetArrayLength(reinterpret_cast(o83))); + EXPECT_EQ(84, env->GetArrayLength(reinterpret_cast(o84))); + EXPECT_EQ(85, env->GetArrayLength(reinterpret_cast(o85))); + EXPECT_EQ(86, env->GetArrayLength(reinterpret_cast(o86))); + EXPECT_EQ(87, env->GetArrayLength(reinterpret_cast(o87))); + EXPECT_EQ(88, env->GetArrayLength(reinterpret_cast(o88))); + EXPECT_EQ(89, env->GetArrayLength(reinterpret_cast(o89))); + EXPECT_EQ(90, env->GetArrayLength(reinterpret_cast(o90))); + EXPECT_EQ(91, env->GetArrayLength(reinterpret_cast(o91))); + EXPECT_EQ(92, env->GetArrayLength(reinterpret_cast(o92))); + EXPECT_EQ(93, env->GetArrayLength(reinterpret_cast(o93))); + EXPECT_EQ(94, env->GetArrayLength(reinterpret_cast(o94))); + EXPECT_EQ(95, env->GetArrayLength(reinterpret_cast(o95))); + EXPECT_EQ(96, env->GetArrayLength(reinterpret_cast(o96))); + EXPECT_EQ(97, env->GetArrayLength(reinterpret_cast(o97))); + EXPECT_EQ(98, env->GetArrayLength(reinterpret_cast(o98))); + EXPECT_EQ(99, env->GetArrayLength(reinterpret_cast(o99))); + EXPECT_EQ(100, env->GetArrayLength(reinterpret_cast(o100))); + EXPECT_EQ(101, env->GetArrayLength(reinterpret_cast(o101))); + EXPECT_EQ(102, env->GetArrayLength(reinterpret_cast(o102))); + EXPECT_EQ(103, env->GetArrayLength(reinterpret_cast(o103))); + EXPECT_EQ(104, env->GetArrayLength(reinterpret_cast(o104))); + EXPECT_EQ(105, env->GetArrayLength(reinterpret_cast(o105))); + EXPECT_EQ(106, env->GetArrayLength(reinterpret_cast(o106))); + EXPECT_EQ(107, env->GetArrayLength(reinterpret_cast(o107))); + EXPECT_EQ(108, env->GetArrayLength(reinterpret_cast(o108))); + EXPECT_EQ(109, env->GetArrayLength(reinterpret_cast(o109))); + EXPECT_EQ(110, env->GetArrayLength(reinterpret_cast(o110))); + EXPECT_EQ(111, env->GetArrayLength(reinterpret_cast(o111))); + EXPECT_EQ(112, env->GetArrayLength(reinterpret_cast(o112))); + EXPECT_EQ(113, env->GetArrayLength(reinterpret_cast(o113))); + EXPECT_EQ(114, env->GetArrayLength(reinterpret_cast(o114))); + EXPECT_EQ(115, env->GetArrayLength(reinterpret_cast(o115))); + EXPECT_EQ(116, env->GetArrayLength(reinterpret_cast(o116))); + EXPECT_EQ(117, env->GetArrayLength(reinterpret_cast(o117))); + EXPECT_EQ(118, env->GetArrayLength(reinterpret_cast(o118))); + EXPECT_EQ(119, env->GetArrayLength(reinterpret_cast(o119))); + EXPECT_EQ(120, env->GetArrayLength(reinterpret_cast(o120))); + EXPECT_EQ(121, env->GetArrayLength(reinterpret_cast(o121))); + EXPECT_EQ(122, env->GetArrayLength(reinterpret_cast(o122))); + EXPECT_EQ(123, env->GetArrayLength(reinterpret_cast(o123))); + EXPECT_EQ(124, env->GetArrayLength(reinterpret_cast(o124))); + EXPECT_EQ(125, env->GetArrayLength(reinterpret_cast(o125))); + EXPECT_EQ(126, env->GetArrayLength(reinterpret_cast(o126))); + EXPECT_EQ(127, env->GetArrayLength(reinterpret_cast(o127))); + EXPECT_EQ(128, env->GetArrayLength(reinterpret_cast(o128))); + EXPECT_EQ(129, env->GetArrayLength(reinterpret_cast(o129))); + EXPECT_EQ(130, env->GetArrayLength(reinterpret_cast(o130))); + EXPECT_EQ(131, env->GetArrayLength(reinterpret_cast(o131))); + EXPECT_EQ(132, env->GetArrayLength(reinterpret_cast(o132))); + EXPECT_EQ(133, env->GetArrayLength(reinterpret_cast(o133))); + EXPECT_EQ(134, env->GetArrayLength(reinterpret_cast(o134))); + EXPECT_EQ(135, env->GetArrayLength(reinterpret_cast(o135))); + EXPECT_EQ(136, env->GetArrayLength(reinterpret_cast(o136))); + EXPECT_EQ(137, env->GetArrayLength(reinterpret_cast(o137))); + EXPECT_EQ(138, env->GetArrayLength(reinterpret_cast(o138))); + EXPECT_EQ(139, env->GetArrayLength(reinterpret_cast(o139))); + EXPECT_EQ(140, env->GetArrayLength(reinterpret_cast(o140))); + EXPECT_EQ(141, env->GetArrayLength(reinterpret_cast(o141))); + EXPECT_EQ(142, env->GetArrayLength(reinterpret_cast(o142))); + EXPECT_EQ(143, env->GetArrayLength(reinterpret_cast(o143))); + EXPECT_EQ(144, env->GetArrayLength(reinterpret_cast(o144))); + EXPECT_EQ(145, env->GetArrayLength(reinterpret_cast(o145))); + EXPECT_EQ(146, env->GetArrayLength(reinterpret_cast(o146))); + EXPECT_EQ(147, env->GetArrayLength(reinterpret_cast(o147))); + EXPECT_EQ(148, env->GetArrayLength(reinterpret_cast(o148))); + EXPECT_EQ(149, env->GetArrayLength(reinterpret_cast(o149))); + EXPECT_EQ(150, env->GetArrayLength(reinterpret_cast(o150))); + EXPECT_EQ(151, env->GetArrayLength(reinterpret_cast(o151))); + EXPECT_EQ(152, env->GetArrayLength(reinterpret_cast(o152))); + EXPECT_EQ(153, env->GetArrayLength(reinterpret_cast(o153))); + EXPECT_EQ(154, env->GetArrayLength(reinterpret_cast(o154))); + EXPECT_EQ(155, env->GetArrayLength(reinterpret_cast(o155))); + EXPECT_EQ(156, env->GetArrayLength(reinterpret_cast(o156))); + EXPECT_EQ(157, env->GetArrayLength(reinterpret_cast(o157))); + EXPECT_EQ(158, env->GetArrayLength(reinterpret_cast(o158))); + EXPECT_EQ(159, env->GetArrayLength(reinterpret_cast(o159))); + EXPECT_EQ(160, env->GetArrayLength(reinterpret_cast(o160))); + EXPECT_EQ(161, env->GetArrayLength(reinterpret_cast(o161))); + EXPECT_EQ(162, env->GetArrayLength(reinterpret_cast(o162))); + EXPECT_EQ(163, env->GetArrayLength(reinterpret_cast(o163))); + EXPECT_EQ(164, env->GetArrayLength(reinterpret_cast(o164))); + EXPECT_EQ(165, env->GetArrayLength(reinterpret_cast(o165))); + EXPECT_EQ(166, env->GetArrayLength(reinterpret_cast(o166))); + EXPECT_EQ(167, env->GetArrayLength(reinterpret_cast(o167))); + EXPECT_EQ(168, env->GetArrayLength(reinterpret_cast(o168))); + EXPECT_EQ(169, env->GetArrayLength(reinterpret_cast(o169))); + EXPECT_EQ(170, env->GetArrayLength(reinterpret_cast(o170))); + EXPECT_EQ(171, env->GetArrayLength(reinterpret_cast(o171))); + EXPECT_EQ(172, env->GetArrayLength(reinterpret_cast(o172))); + EXPECT_EQ(173, env->GetArrayLength(reinterpret_cast(o173))); + EXPECT_EQ(174, env->GetArrayLength(reinterpret_cast(o174))); + EXPECT_EQ(175, env->GetArrayLength(reinterpret_cast(o175))); + EXPECT_EQ(176, env->GetArrayLength(reinterpret_cast(o176))); + EXPECT_EQ(177, env->GetArrayLength(reinterpret_cast(o177))); + EXPECT_EQ(178, env->GetArrayLength(reinterpret_cast(o178))); + EXPECT_EQ(179, env->GetArrayLength(reinterpret_cast(o179))); + EXPECT_EQ(180, env->GetArrayLength(reinterpret_cast(o180))); + EXPECT_EQ(181, env->GetArrayLength(reinterpret_cast(o181))); + EXPECT_EQ(182, env->GetArrayLength(reinterpret_cast(o182))); + EXPECT_EQ(183, env->GetArrayLength(reinterpret_cast(o183))); + EXPECT_EQ(184, env->GetArrayLength(reinterpret_cast(o184))); + EXPECT_EQ(185, env->GetArrayLength(reinterpret_cast(o185))); + EXPECT_EQ(186, env->GetArrayLength(reinterpret_cast(o186))); + EXPECT_EQ(187, env->GetArrayLength(reinterpret_cast(o187))); + EXPECT_EQ(188, env->GetArrayLength(reinterpret_cast(o188))); + EXPECT_EQ(189, env->GetArrayLength(reinterpret_cast(o189))); + EXPECT_EQ(190, env->GetArrayLength(reinterpret_cast(o190))); + EXPECT_EQ(191, env->GetArrayLength(reinterpret_cast(o191))); + EXPECT_EQ(192, env->GetArrayLength(reinterpret_cast(o192))); + EXPECT_EQ(193, env->GetArrayLength(reinterpret_cast(o193))); + EXPECT_EQ(194, env->GetArrayLength(reinterpret_cast(o194))); + EXPECT_EQ(195, env->GetArrayLength(reinterpret_cast(o195))); + EXPECT_EQ(196, env->GetArrayLength(reinterpret_cast(o196))); + EXPECT_EQ(197, env->GetArrayLength(reinterpret_cast(o197))); + EXPECT_EQ(198, env->GetArrayLength(reinterpret_cast(o198))); + EXPECT_EQ(199, env->GetArrayLength(reinterpret_cast(o199))); + EXPECT_EQ(200, env->GetArrayLength(reinterpret_cast(o200))); + EXPECT_EQ(201, env->GetArrayLength(reinterpret_cast(o201))); + EXPECT_EQ(202, env->GetArrayLength(reinterpret_cast(o202))); + EXPECT_EQ(203, env->GetArrayLength(reinterpret_cast(o203))); + EXPECT_EQ(204, env->GetArrayLength(reinterpret_cast(o204))); + EXPECT_EQ(205, env->GetArrayLength(reinterpret_cast(o205))); + EXPECT_EQ(206, env->GetArrayLength(reinterpret_cast(o206))); + EXPECT_EQ(207, env->GetArrayLength(reinterpret_cast(o207))); + EXPECT_EQ(208, env->GetArrayLength(reinterpret_cast(o208))); + EXPECT_EQ(209, env->GetArrayLength(reinterpret_cast(o209))); + EXPECT_EQ(210, env->GetArrayLength(reinterpret_cast(o210))); + EXPECT_EQ(211, env->GetArrayLength(reinterpret_cast(o211))); + EXPECT_EQ(212, env->GetArrayLength(reinterpret_cast(o212))); + EXPECT_EQ(213, env->GetArrayLength(reinterpret_cast(o213))); + EXPECT_EQ(214, env->GetArrayLength(reinterpret_cast(o214))); + EXPECT_EQ(215, env->GetArrayLength(reinterpret_cast(o215))); + EXPECT_EQ(216, env->GetArrayLength(reinterpret_cast(o216))); + EXPECT_EQ(217, env->GetArrayLength(reinterpret_cast(o217))); + EXPECT_EQ(218, env->GetArrayLength(reinterpret_cast(o218))); + EXPECT_EQ(219, env->GetArrayLength(reinterpret_cast(o219))); + EXPECT_EQ(220, env->GetArrayLength(reinterpret_cast(o220))); + EXPECT_EQ(221, env->GetArrayLength(reinterpret_cast(o221))); + EXPECT_EQ(222, env->GetArrayLength(reinterpret_cast(o222))); + EXPECT_EQ(223, env->GetArrayLength(reinterpret_cast(o223))); + EXPECT_EQ(224, env->GetArrayLength(reinterpret_cast(o224))); + EXPECT_EQ(225, env->GetArrayLength(reinterpret_cast(o225))); + EXPECT_EQ(226, env->GetArrayLength(reinterpret_cast(o226))); + EXPECT_EQ(227, env->GetArrayLength(reinterpret_cast(o227))); + EXPECT_EQ(228, env->GetArrayLength(reinterpret_cast(o228))); + EXPECT_EQ(229, env->GetArrayLength(reinterpret_cast(o229))); + EXPECT_EQ(230, env->GetArrayLength(reinterpret_cast(o230))); + EXPECT_EQ(231, env->GetArrayLength(reinterpret_cast(o231))); + EXPECT_EQ(232, env->GetArrayLength(reinterpret_cast(o232))); + EXPECT_EQ(233, env->GetArrayLength(reinterpret_cast(o233))); + EXPECT_EQ(234, env->GetArrayLength(reinterpret_cast(o234))); + EXPECT_EQ(235, env->GetArrayLength(reinterpret_cast(o235))); + EXPECT_EQ(236, env->GetArrayLength(reinterpret_cast(o236))); + EXPECT_EQ(237, env->GetArrayLength(reinterpret_cast(o237))); + EXPECT_EQ(238, env->GetArrayLength(reinterpret_cast(o238))); + EXPECT_EQ(239, env->GetArrayLength(reinterpret_cast(o239))); + EXPECT_EQ(240, env->GetArrayLength(reinterpret_cast(o240))); + EXPECT_EQ(241, env->GetArrayLength(reinterpret_cast(o241))); + EXPECT_EQ(242, env->GetArrayLength(reinterpret_cast(o242))); + EXPECT_EQ(243, env->GetArrayLength(reinterpret_cast(o243))); + EXPECT_EQ(244, env->GetArrayLength(reinterpret_cast(o244))); + EXPECT_EQ(245, env->GetArrayLength(reinterpret_cast(o245))); + EXPECT_EQ(246, env->GetArrayLength(reinterpret_cast(o246))); + EXPECT_EQ(247, env->GetArrayLength(reinterpret_cast(o247))); + EXPECT_EQ(248, env->GetArrayLength(reinterpret_cast(o248))); + EXPECT_EQ(249, env->GetArrayLength(reinterpret_cast(o249))); + EXPECT_EQ(250, env->GetArrayLength(reinterpret_cast(o250))); + EXPECT_EQ(251, env->GetArrayLength(reinterpret_cast(o251))); + EXPECT_EQ(252, env->GetArrayLength(reinterpret_cast(o252))); + EXPECT_EQ(253, env->GetArrayLength(reinterpret_cast(o253))); + } +} + +const char* longSig = + "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V"; + +void JniCompilerTest::MaxParamNumberImpl() { + SetUpForTest(false, "maxParamNumber", longSig, + CURRENT_JNI_WRAPPER(Java_MyClassNatives_maxParamNumber)); + + jvalue args[254]; + + // First test: test with all arguments null. + for (int i = 0; i < 254; ++i) { + args[i].l = nullptr; + } + + env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); + + // Second test: test with int[] objects with increasing lengths + for (int i = 0; i < 254; ++i) { + jintArray tmp = env_->NewIntArray(i); + args[i].l = tmp; + EXPECT_NE(args[i].l, nullptr); + } + + env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); +} + +JNI_TEST(MaxParamNumber) + +void JniCompilerTest::WithoutImplementationImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "withoutImplementation", "()V", NORMAL_OR_FAST_JNI_ONLY_NULLPTR); + + env_->CallVoidMethod(jobj_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST(WithoutImplementation) + +void JniCompilerTest::WithoutImplementationRefReturnImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, + "withoutImplementationRefReturn", + "()Ljava/lang/Object;", + NORMAL_OR_FAST_JNI_ONLY_NULLPTR); + + env_->CallObjectMethod(jobj_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST(WithoutImplementationRefReturn) + +void JniCompilerTest::StaticWithoutImplementationImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(true, "staticWithoutImplementation", "()V", nullptr); + + env_->CallStaticVoidMethod(jklass_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST_CRITICAL(StaticWithoutImplementation) + +void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3, + jint i4, jint i5, jint i6, jint i7, jint i8, jint i9, + jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4, + jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9, + jfloat f10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsIntsFirstImpl() { + SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsIntsFirst)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, f1, f2, + f3, f4, f5, f6, f7, f8, f9, f10); +} + +JNI_TEST_CRITICAL(StackArgsIntsFirst) + +void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2, + jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7, + jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2, + jint i3, jint i4, jint i5, jint i6, jint i7, jint i8, + jint i9, jint i10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsFloatsFirstImpl() { + SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsFloatsFirst)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, i1, i2, i3, + i4, i5, i6, i7, i8, i9, i10); +} + +JNI_TEST_CRITICAL(StackArgsFloatsFirst) + +void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2, + jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5, + jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8, + jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsMixedImpl() { + SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsMixed)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, i1, f1, i2, f2, i3, f3, i4, f4, i5, f5, i6, f6, i7, + f7, i8, f8, i9, f9, i10, f10); +} + +JNI_TEST_CRITICAL(StackArgsMixed) + +void Java_MyClassNatives_normalNative(JNIEnv*, jclass) { + // Intentionally left empty. +} + +// Methods not annotated with anything are not considered "fast native" +// -- Check that the annotation lookup does not find it. +void JniCompilerTest::NormalNativeImpl() { + SetUpForTest(/* direct= */ true, + "normalNative", + "()V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative)); + + ArtMethod* method = jni::DecodeArtMethod(jmethod_); + ASSERT_TRUE(method != nullptr); + + EXPECT_FALSE(method->IsCriticalNative()); + EXPECT_FALSE(method->IsFastNative()); +} + +// TODO: just rename the java functions to the standard convention and remove duplicated tests +JNI_TEST_NORMAL_ONLY(NormalNative) + +// Methods annotated with @FastNative are considered "fast native" +// -- Check that the annotation lookup succeeds. +void Java_MyClassNatives_fastNative(JNIEnv*, jclass) { + // Intentionally left empty. +} + +void JniCompilerTest::FastNativeImpl() { + SetUpForTest(/* direct= */ true, + "fastNative", + "()V", + CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative)); + + ArtMethod* method = jni::DecodeArtMethod(jmethod_); + ASSERT_TRUE(method != nullptr); + + EXPECT_FALSE(method->IsCriticalNative()); + EXPECT_TRUE(method->IsFastNative()); +} + +// TODO: just rename the java functions to the standard convention and remove duplicated tests +JNI_TEST_NORMAL_ONLY(FastNative) + +int gJava_myClassNatives_criticalNative_calls[kJniKindCount] = {}; +// Methods annotated with @CriticalNative are considered "critical native" +// -- Check that the annotation lookup succeeds. +void Java_MyClassNatives_criticalNative() { + gJava_myClassNatives_criticalNative_calls[gCurrentJni]++; +} + +void JniCompilerTest::CriticalNativeImpl() { + SetUpForTest(/* direct= */ true, + // Important: Don't change the "current jni" yet to avoid a method name suffix. + "criticalNative", + "()V", + // TODO: Use CURRENT_JNI_WRAPPER instead which is more generic. + reinterpret_cast(&Java_MyClassNatives_criticalNative)); + + // TODO: remove this manual updating of the current JNI. Merge with the other tests. + UpdateCurrentJni(JniKind::kCritical); + ASSERT_TRUE(IsCurrentJniCritical()); + + ArtMethod* method = jni::DecodeArtMethod(jmethod_); + ASSERT_TRUE(method != nullptr); + + EXPECT_TRUE(method->IsCriticalNative()); + EXPECT_FALSE(method->IsFastNative()); + + EXPECT_EQ(0, gJava_myClassNatives_criticalNative_calls[gCurrentJni]); + env_->CallStaticVoidMethod(jklass_, jmethod_); + EXPECT_EQ(1, gJava_myClassNatives_criticalNative_calls[gCurrentJni]); + + gJava_myClassNatives_criticalNative_calls[gCurrentJni] = 0; +} + +// TODO: just rename the java functions to the standard convention and remove duplicated tests +JNI_TEST_NORMAL_ONLY(CriticalNative) + +} // namespace art diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc new file mode 100644 index 0000000..e06c914 --- /dev/null +++ b/compiler/jni/quick/arm/calling_convention_arm.cc @@ -0,0 +1,552 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_arm.h" + +#include + +#include "arch/arm/jni_frame_arm.h" +#include "arch/instruction_set.h" +#include "base/macros.h" +#include "handle_scope-inl.h" +#include "utils/arm/managed_register_arm.h" + +namespace art { +namespace arm { + +static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size"); + +// +// JNI calling convention constants. +// + +// List of parameters passed via registers for JNI. +// JNI uses soft-float, so there is only a GPR list. +static const Register kJniArgumentRegisters[] = { + R0, R1, R2, R3 +}; + +static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters)); + +// +// Managed calling convention constants. +// + +// Used by hard float. (General purpose registers.) +static const Register kHFCoreArgumentRegisters[] = { + R0, R1, R2, R3 +}; + +// (VFP single-precision registers.) +static const SRegister kHFSArgumentRegisters[] = { + S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15 +}; + +// (VFP double-precision registers.) +static const DRegister kHFDArgumentRegisters[] = { + D0, D1, D2, D3, D4, D5, D6, D7 +}; + +static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters), + "ks d argument registers mismatch"); + +// +// Shared managed+JNI calling convention constants. +// + +static constexpr ManagedRegister kCalleeSaveRegisters[] = { + // Core registers. + ArmManagedRegister::FromCoreRegister(R5), + ArmManagedRegister::FromCoreRegister(R6), + ArmManagedRegister::FromCoreRegister(R7), + ArmManagedRegister::FromCoreRegister(R8), + ArmManagedRegister::FromCoreRegister(R10), + ArmManagedRegister::FromCoreRegister(R11), + ArmManagedRegister::FromCoreRegister(LR), + // Hard float registers. + ArmManagedRegister::FromSRegister(S16), + ArmManagedRegister::FromSRegister(S17), + ArmManagedRegister::FromSRegister(S18), + ArmManagedRegister::FromSRegister(S19), + ArmManagedRegister::FromSRegister(S20), + ArmManagedRegister::FromSRegister(S21), + ArmManagedRegister::FromSRegister(S22), + ArmManagedRegister::FromSRegister(S23), + ArmManagedRegister::FromSRegister(S24), + ArmManagedRegister::FromSRegister(S25), + ArmManagedRegister::FromSRegister(S26), + ArmManagedRegister::FromSRegister(S27), + ArmManagedRegister::FromSRegister(S28), + ArmManagedRegister::FromSRegister(S29), + ArmManagedRegister::FromSRegister(S30), + ArmManagedRegister::FromSRegister(S31) +}; + +template +static constexpr uint32_t CalculateCoreCalleeSpillMask( + const ManagedRegister (&callee_saves)[size]) { + // LR is a special callee save which is not reported by CalleeSaveRegisters(). + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsArm().IsCoreRegister()) { + result |= (1u << r.AsArm().AsCoreRegister()); + } + } + return result; +} + +template +static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsArm().IsSRegister()) { + result |= (1u << r.AsArm().AsSRegister()); + } + } + return result; +} + +static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters); +static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters); + +static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = { + // Core registers. + ArmManagedRegister::FromCoreRegister(R4), + ArmManagedRegister::FromCoreRegister(R5), + ArmManagedRegister::FromCoreRegister(R6), + ArmManagedRegister::FromCoreRegister(R7), + ArmManagedRegister::FromCoreRegister(R8), + ArmManagedRegister::FromCoreRegister(R9), // The platform register is callee-save on Android. + ArmManagedRegister::FromCoreRegister(R10), + ArmManagedRegister::FromCoreRegister(R11), + ArmManagedRegister::FromCoreRegister(LR), + // Hard float registers. + ArmManagedRegister::FromSRegister(S16), + ArmManagedRegister::FromSRegister(S17), + ArmManagedRegister::FromSRegister(S18), + ArmManagedRegister::FromSRegister(S19), + ArmManagedRegister::FromSRegister(S20), + ArmManagedRegister::FromSRegister(S21), + ArmManagedRegister::FromSRegister(S22), + ArmManagedRegister::FromSRegister(S23), + ArmManagedRegister::FromSRegister(S24), + ArmManagedRegister::FromSRegister(S25), + ArmManagedRegister::FromSRegister(S26), + ArmManagedRegister::FromSRegister(S27), + ArmManagedRegister::FromSRegister(S28), + ArmManagedRegister::FromSRegister(S29), + ArmManagedRegister::FromSRegister(S30), + ArmManagedRegister::FromSRegister(S31) +}; + +static constexpr uint32_t kAapcsCoreCalleeSpillMask = + CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters); +static constexpr uint32_t kAapcsFpCalleeSpillMask = + CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters); + +// Calling convention + +ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() const { + return ArmManagedRegister::FromCoreRegister(IP); // R12 +} + +ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() const { + return ArmManagedRegister::FromCoreRegister(IP); // R12 +} + +ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() { + switch (GetShorty()[0]) { + case 'V': + return ArmManagedRegister::NoRegister(); + case 'D': + return ArmManagedRegister::FromDRegister(D0); + case 'F': + return ArmManagedRegister::FromSRegister(S0); + case 'J': + return ArmManagedRegister::FromRegisterPair(R0_R1); + default: + return ArmManagedRegister::FromCoreRegister(R0); + } +} + +ManagedRegister ArmJniCallingConvention::ReturnRegister() { + switch (GetShorty()[0]) { + case 'V': + return ArmManagedRegister::NoRegister(); + case 'D': + case 'J': + return ArmManagedRegister::FromRegisterPair(R0_R1); + default: + return ArmManagedRegister::FromCoreRegister(R0); + } +} + +ManagedRegister ArmJniCallingConvention::IntReturnRegister() { + return ArmManagedRegister::FromCoreRegister(R0); +} + +// Managed runtime calling convention + +ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() { + return ArmManagedRegister::FromCoreRegister(R0); +} + +bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + UNREACHABLE(); +} + +FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + return FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method* + (itr_slots_ * kFramePointerSize)); // offset into in args +} + +const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on ARM to free them up for scratch use, we then assume + // all arguments are on the stack. + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + uint32_t gpr_index = 1; // R0 ~ R3. Reserve r0 for ArtMethod*. + uint32_t fpr_index = 0; // S0 ~ S15. + uint32_t fpr_double_index = 0; // D0 ~ D7. + + ResetIterator(FrameOffset(0)); + while (HasNext()) { + if (IsCurrentParamAFloatOrDouble()) { + if (IsCurrentParamADouble()) { // Double. + // Double should not overlap with float. + fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2; + if (fpr_double_index < arraysize(kHFDArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } + } else { // Float. + // Float should not overlap with double. + if (fpr_index % 2 == 0) { + fpr_index = std::max(fpr_double_index * 2, fpr_index); + } + if (fpr_index < arraysize(kHFSArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } else { + // FIXME: Pointer this returns as both reference and long. + if (IsCurrentParamALong() && !IsCurrentParamAReference()) { // Long. + if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { + // Skip R1, and use R2_R3 if the long is the first parameter. + if (gpr_index == 1) { + gpr_index++; + } + } + + // If it spans register and memory, we must use the value in memory. + if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { + entry_spills_.push_back( + ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); + } else if (gpr_index == arraysize(kHFCoreArgumentRegisters) - 1) { + gpr_index++; + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + // High part of long or 32-bit argument. + if (gpr_index < arraysize(kHFCoreArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty) + : JniCallingConvention(is_static, + is_synchronized, + is_critical_native, + shorty, + kArmPointerSize) { + // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are + // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to + // maintain 8-byte alignment invariant. + // + // Compute padding to ensure longs and doubles are not split in AAPCS. + size_t shift = 0; + + size_t cur_arg, cur_reg; + if (LIKELY(HasExtraArgumentsForJni())) { + // Ignore the 'this' jobject or jclass for static methods and the JNIEnv. + // We start at the aligned register r2. + // + // Ignore the first 2 parameters because they are guaranteed to be aligned. + cur_arg = NumImplicitArgs(); // skip the "this" arg. + cur_reg = 2; // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2). + } else { + // Check every parameter. + cur_arg = 0; + cur_reg = 0; + } + + // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner? + // (this just seems like an unnecessary micro-optimization). + + // Shift across a logical register mapping that looks like: + // + // | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 | + // + // (where SP is some arbitrary stack pointer that our 0th stack arg would go into). + // + // Any time there would normally be a long/double in an odd logical register, + // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment. + // + // This works for both physical register pairs {r0, r1}, {r2, r3} and for when + // the value is on the stack. + // + // For example: + // (a) long would normally go into r1, but we shift it into r2 + // | INT | (PAD) | LONG | + // | r0 | r1 | r2 | r3 | + // + // (b) long would normally go into r3, but we shift it into SP + // | INT | INT | INT | (PAD) | LONG | + // | r0 | r1 | r2 | r3 | SP+4 SP+8| + // + // where INT is any <=4 byte arg, and LONG is any 8-byte arg. + for (; cur_arg < NumArgs(); cur_arg++) { + if (IsParamALongOrDouble(cur_arg)) { + if ((cur_reg & 1) != 0) { // check that it's in a logical contiguous register pair + shift += 4; + cur_reg++; // additional bump to ensure alignment + } + cur_reg += 2; // bump the iterator twice for every long argument + } else { + cur_reg++; // bump the iterator for every non-long argument + } + } + + if (cur_reg <= kJniArgumentRegisterCount) { + // As a special case when, as a result of shifting (or not) there are no arguments on the stack, + // we actually have 0 stack padding. + // + // For example with @CriticalNative and: + // (int, long) -> shifts the long but doesn't need to pad the stack + // + // shift + // \/ + // | INT | (PAD) | LONG | (EMPTY) ... + // | r0 | r1 | r2 | r3 | SP ... + // /\ + // no stack padding + padding_ = 0; + } else { + padding_ = shift; + } + + // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases + // (a) Using r0,r1 pair = f(long,...) + // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...); + // (c) Shifting but not introducing a stack padding = f(int, long); +} + +uint32_t ArmJniCallingConvention::CoreSpillMask() const { + // Compute spill mask to agree with callee saves initialized in the constructor + return is_critical_native_ ? 0u : kCoreCalleeSpillMask; +} + +uint32_t ArmJniCallingConvention::FpSpillMask() const { + return is_critical_native_ ? 0u : kFpCalleeSpillMask; +} + +ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const { + return ArmManagedRegister::FromCoreRegister(R2); +} + +size_t ArmJniCallingConvention::FrameSize() const { + if (UNLIKELY(is_critical_native_)) { + CHECK(!SpillsMethod()); + CHECK(!HasLocalReferenceSegmentState()); + CHECK(!HasHandleScope()); + CHECK(!SpillsReturnValue()); + return 0u; // There is no managed frame for @CriticalNative. + } + + // Method*, callee save area size, local reference segment state + CHECK(SpillsMethod()); + const size_t method_ptr_size = static_cast(kArmPointerSize); + const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; + size_t total_size = method_ptr_size + callee_save_area_size; + + CHECK(HasLocalReferenceSegmentState()); + // local reference segment state + total_size += kFramePointerSize; + // TODO: Probably better to use sizeof(IRTSegmentState) here... + + CHECK(HasHandleScope()); + total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount()); + + // Plus return value spill area size + CHECK(SpillsReturnValue()); + total_size += SizeOfReturnValue(); + + return RoundUp(total_size, kStackAlignment); +} + +size_t ArmJniCallingConvention::OutArgSize() const { + // Count param args, including JNIEnv* and jclass*; count 8-byte args twice. + size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs(); + // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.) + size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args); + // The size of outgoing arguments. + size_t size = stack_args * kFramePointerSize + padding_; + + // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS. + static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u); + static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u); + + // For @CriticalNative, we can make a tail call if there are no stack args and the + // return type is not an FP type (otherwise we need to move the result to FP register). + DCHECK(!RequiresSmallResultTypeExtension()); + if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) { + size += kFramePointerSize; // We need to spill LR with the args. + } + size_t out_args_size = RoundUp(size, kAapcsStackAlignment); + if (UNLIKELY(IsCriticalNative())) { + DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u)); + } + return out_args_size; +} + +ArrayRef ArmJniCallingConvention::CalleeSaveRegisters() const { + if (UNLIKELY(IsCriticalNative())) { + if (UseTailCall()) { + return ArrayRef(); // Do not spill anything. + } else { + // Spill LR with out args. + static_assert((kCoreCalleeSpillMask >> LR) == 1u); // Contains LR as the highest bit. + constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u; + static_assert(kCalleeSaveRegisters[lr_index].Equals( + ArmManagedRegister::FromCoreRegister(LR))); + return ArrayRef(kCalleeSaveRegisters).SubArray( + /*pos*/ lr_index, /*length=*/ 1u); + } + } else { + return ArrayRef(kCalleeSaveRegisters); + } +} + +// JniCallingConvention ABI follows AAPCS where longs and doubles must occur +// in even register numbers and stack slots +void ArmJniCallingConvention::Next() { + // Update the iterator by usual JNI rules. + JniCallingConvention::Next(); + + if (LIKELY(HasNext())) { // Avoid CHECK failure for IsCurrentParam + // Ensure slot is 8-byte aligned for longs/doubles (AAPCS). + if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) { + // itr_slots_ needs to be an even number, according to AAPCS. + itr_slots_++; + } + } +} + +bool ArmJniCallingConvention::IsCurrentParamInRegister() { + return itr_slots_ < kJniArgumentRegisterCount; +} + +bool ArmJniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +ManagedRegister ArmJniCallingConvention::CurrentParamRegister() { + CHECK_LT(itr_slots_, kJniArgumentRegisterCount); + if (IsCurrentParamALongOrDouble()) { + // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair: + // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and + // r3). The content of the registers is as if the value had been loaded from memory + // representation with a single LDM instruction." + if (itr_slots_ == 0u) { + return ArmManagedRegister::FromRegisterPair(R0_R1); + } else if (itr_slots_ == 2u) { + return ArmManagedRegister::FromRegisterPair(R2_R3); + } else { + // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values. + LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_; + UNREACHABLE(); + } + } else { + // All other types can fit into one register. + return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]); + } +} + +FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() { + CHECK_GE(itr_slots_, kJniArgumentRegisterCount); + size_t offset = + displacement_.Int32Value() + - OutArgSize() + + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const { + CHECK(IsCriticalNative()); + // R4 is neither managed callee-save, nor argument register, nor scratch register. + // (It is native callee-save but the value coming from managed code can be clobbered.) + // TODO: Change to static_assert; std::none_of should be constexpr since C++20. + DCHECK(std::none_of(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kCalleeSaveRegisters), + [](ManagedRegister callee_save) constexpr { + return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4)); + })); + DCHECK(std::none_of(kJniArgumentRegisters, + kJniArgumentRegisters + std::size(kJniArgumentRegisters), + [](Register reg) { return reg == R4; })); + DCHECK(!InterproceduralScratchRegister().Equals(ArmManagedRegister::FromCoreRegister(R4))); + return ArmManagedRegister::FromCoreRegister(R4); +} + +// Whether to use tail call (used only for @CriticalNative). +bool ArmJniCallingConvention::UseTailCall() const { + CHECK(IsCriticalNative()); + return OutArgSize() == 0u; +} + +} // namespace arm +} // namespace art diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h new file mode 100644 index 0000000..e4b86fa --- /dev/null +++ b/compiler/jni/quick/arm/calling_convention_arm.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ +#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ + +#include "base/enums.h" +#include "jni/quick/calling_convention.h" + +namespace art { +namespace arm { + +class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention { + public: + ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, + is_synchronized, + shorty, + PointerSize::k32) {} + ~ArmManagedRuntimeCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // Managed runtime calling convention + ManagedRegister MethodRegister() override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + const ManagedRegisterEntrySpills& EntrySpills() override; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention); +}; + +class ArmJniCallingConvention final : public JniCallingConvention { + public: + ArmJniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty); + ~ArmJniCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister IntReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // JNI calling convention + void Next() override; // Override default behavior for AAPCS + size_t FrameSize() const override; + size_t OutArgSize() const override; + ArrayRef CalleeSaveRegisters() const override; + ManagedRegister ReturnScratchRegister() const override; + uint32_t CoreSpillMask() const override; + uint32_t FpSpillMask() const override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + + // AAPCS mandates return values are extended. + bool RequiresSmallResultTypeExtension() const override { + return false; + } + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + ManagedRegister HiddenArgumentRegister() const override; + + // Whether to use tail call (used only for @CriticalNative). + bool UseTailCall() const override; + + private: + // Padding to ensure longs and doubles are not split in AAPCS + size_t padding_; + + DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention); +}; + +} // namespace arm +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc new file mode 100644 index 0000000..231e140 --- /dev/null +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_arm64.h" + +#include + +#include "arch/arm64/jni_frame_arm64.h" +#include "arch/instruction_set.h" +#include "handle_scope-inl.h" +#include "utils/arm64/managed_register_arm64.h" + +namespace art { +namespace arm64 { + +static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size"); + +static const XRegister kXArgumentRegisters[] = { + X0, X1, X2, X3, X4, X5, X6, X7 +}; +static_assert(kMaxIntLikeRegisterArguments == arraysize(kXArgumentRegisters)); + +static const WRegister kWArgumentRegisters[] = { + W0, W1, W2, W3, W4, W5, W6, W7 +}; +static_assert(kMaxIntLikeRegisterArguments == arraysize(kWArgumentRegisters)); + +static const DRegister kDArgumentRegisters[] = { + D0, D1, D2, D3, D4, D5, D6, D7 +}; +static_assert(kMaxFloatOrDoubleRegisterArguments == arraysize(kDArgumentRegisters)); + +static const SRegister kSArgumentRegisters[] = { + S0, S1, S2, S3, S4, S5, S6, S7 +}; +static_assert(kMaxFloatOrDoubleRegisterArguments == arraysize(kSArgumentRegisters)); + +static constexpr ManagedRegister kCalleeSaveRegisters[] = { + // Core registers. + // Note: The native jni function may call to some VM runtime functions which may suspend + // or trigger GC. And the jni method frame will become top quick frame in those cases. + // So we need to satisfy GC to save LR and callee-save registers which is similar to + // CalleeSaveMethod(RefOnly) frame. + // Jni function is the native function which the java code wants to call. + // Jni method is the method that is compiled by jni compiler. + // Call chain: managed code(java) --> jni method --> jni function. + // This does not apply to the @CriticalNative. + + // Thread register(X19) is saved on stack. + Arm64ManagedRegister::FromXRegister(X19), + Arm64ManagedRegister::FromXRegister(X20), + Arm64ManagedRegister::FromXRegister(X21), + Arm64ManagedRegister::FromXRegister(X22), + Arm64ManagedRegister::FromXRegister(X23), + Arm64ManagedRegister::FromXRegister(X24), + Arm64ManagedRegister::FromXRegister(X25), + Arm64ManagedRegister::FromXRegister(X26), + Arm64ManagedRegister::FromXRegister(X27), + Arm64ManagedRegister::FromXRegister(X28), + Arm64ManagedRegister::FromXRegister(X29), + Arm64ManagedRegister::FromXRegister(LR), + // Hard float registers. + // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2, + // we may break on java_method_2 and we still need to find out the values of DEX registers + // in java_method_1. So all callee-saves(in managed code) need to be saved. + Arm64ManagedRegister::FromDRegister(D8), + Arm64ManagedRegister::FromDRegister(D9), + Arm64ManagedRegister::FromDRegister(D10), + Arm64ManagedRegister::FromDRegister(D11), + Arm64ManagedRegister::FromDRegister(D12), + Arm64ManagedRegister::FromDRegister(D13), + Arm64ManagedRegister::FromDRegister(D14), + Arm64ManagedRegister::FromDRegister(D15), +}; + +template +static constexpr uint32_t CalculateCoreCalleeSpillMask( + const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsArm64().IsXRegister()) { + result |= (1u << r.AsArm64().AsXRegister()); + } + } + return result; +} + +template +static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsArm64().IsDRegister()) { + result |= (1u << r.AsArm64().AsDRegister()); + } + } + return result; +} + +static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters); +static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters); + +static constexpr ManagedRegister kAapcs64CalleeSaveRegisters[] = { + // Core registers. + Arm64ManagedRegister::FromXRegister(X19), + Arm64ManagedRegister::FromXRegister(X20), + Arm64ManagedRegister::FromXRegister(X21), + Arm64ManagedRegister::FromXRegister(X22), + Arm64ManagedRegister::FromXRegister(X23), + Arm64ManagedRegister::FromXRegister(X24), + Arm64ManagedRegister::FromXRegister(X25), + Arm64ManagedRegister::FromXRegister(X26), + Arm64ManagedRegister::FromXRegister(X27), + Arm64ManagedRegister::FromXRegister(X28), + Arm64ManagedRegister::FromXRegister(X29), + Arm64ManagedRegister::FromXRegister(LR), + // Hard float registers. + Arm64ManagedRegister::FromDRegister(D8), + Arm64ManagedRegister::FromDRegister(D9), + Arm64ManagedRegister::FromDRegister(D10), + Arm64ManagedRegister::FromDRegister(D11), + Arm64ManagedRegister::FromDRegister(D12), + Arm64ManagedRegister::FromDRegister(D13), + Arm64ManagedRegister::FromDRegister(D14), + Arm64ManagedRegister::FromDRegister(D15), +}; + +static constexpr uint32_t kAapcs64CoreCalleeSpillMask = + CalculateCoreCalleeSpillMask(kAapcs64CalleeSaveRegisters); +static constexpr uint32_t kAapcs64FpCalleeSpillMask = + CalculateFpCalleeSpillMask(kAapcs64CalleeSaveRegisters); + +// Calling convention +ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const { + return Arm64ManagedRegister::FromXRegister(IP0); // X16 +} + +ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() const { + return Arm64ManagedRegister::FromXRegister(IP0); // X16 +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty) { + if (shorty[0] == 'F') { + return Arm64ManagedRegister::FromSRegister(S0); + } else if (shorty[0] == 'D') { + return Arm64ManagedRegister::FromDRegister(D0); + } else if (shorty[0] == 'J') { + return Arm64ManagedRegister::FromXRegister(X0); + } else if (shorty[0] == 'V') { + return Arm64ManagedRegister::NoRegister(); + } else { + return Arm64ManagedRegister::FromWRegister(W0); + } +} + +ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Arm64JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Arm64JniCallingConvention::IntReturnRegister() { + return Arm64ManagedRegister::FromWRegister(W0); +} + +// Managed runtime calling convention + +ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() { + return Arm64ManagedRegister::FromXRegister(X0); +} + +bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + UNREACHABLE(); +} + +FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + return FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref + (itr_slots_ * sizeof(uint32_t))); // offset into in args +} + +const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on ARM64 to free them up for scratch use, we then assume + // all arguments are on the stack. + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*. + int fp_reg_index = 0; // D0/S0. + + // We need to choose the correct register (D/S or X/W) since the managed + // stack uses 32bit stack slots. + ResetIterator(FrameOffset(0)); + while (HasNext()) { + if (IsCurrentParamAFloatOrDouble()) { // FP regs. + if (fp_reg_index < 8) { + if (!IsCurrentParamADouble()) { + entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index])); + } else { + entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index])); + } + fp_reg_index++; + } else { // just increase the stack offset. + if (!IsCurrentParamADouble()) { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } + } + } else { // GP regs. + if (gp_reg_index < 8) { + if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { + entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index])); + } else { + entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index])); + } + gp_reg_index++; + } else { // just increase the stack offset. + if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty) + : JniCallingConvention(is_static, + is_synchronized, + is_critical_native, + shorty, + kArm64PointerSize) { +} + +uint32_t Arm64JniCallingConvention::CoreSpillMask() const { + return is_critical_native_ ? 0u : kCoreCalleeSpillMask; +} + +uint32_t Arm64JniCallingConvention::FpSpillMask() const { + return is_critical_native_ ? 0u : kFpCalleeSpillMask; +} + +ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); +} + +size_t Arm64JniCallingConvention::FrameSize() const { + if (is_critical_native_) { + CHECK(!SpillsMethod()); + CHECK(!HasLocalReferenceSegmentState()); + CHECK(!HasHandleScope()); + CHECK(!SpillsReturnValue()); + return 0u; // There is no managed frame for @CriticalNative. + } + + // Method*, callee save area size, local reference segment state + CHECK(SpillsMethod()); + size_t method_ptr_size = static_cast(kFramePointerSize); + size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; + size_t total_size = method_ptr_size + callee_save_area_size; + + CHECK(HasLocalReferenceSegmentState()); + total_size += sizeof(uint32_t); + + CHECK(HasHandleScope()); + total_size += HandleScope::SizeOf(kArm64PointerSize, ReferenceCount()); + + // Plus return value spill area size + CHECK(SpillsReturnValue()); + total_size += SizeOfReturnValue(); + + return RoundUp(total_size, kStackAlignment); +} + +size_t Arm64JniCallingConvention::OutArgSize() const { + // Count param args, including JNIEnv* and jclass*. + size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs(); + size_t num_fp_args = NumFloatOrDoubleArgs(); + DCHECK_GE(all_args, num_fp_args); + size_t num_non_fp_args = all_args - num_fp_args; + // Account for FP arguments passed through v0-v7. + size_t num_stack_fp_args = + num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args); + // Account for other (integer and pointer) arguments passed through GPR (x0-x7). + size_t num_stack_non_fp_args = + num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args); + // The size of outgoing arguments. + size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize; + + // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS64. + static_assert((kCoreCalleeSpillMask & ~kAapcs64CoreCalleeSpillMask) == 0u); + static_assert((kFpCalleeSpillMask & ~kAapcs64FpCalleeSpillMask) == 0u); + + // For @CriticalNative, we can make a tail call if there are no stack args and + // we do not need to extend the result. Otherwise, add space for return PC. + if (is_critical_native_ && (size != 0u || RequiresSmallResultTypeExtension())) { + size += kFramePointerSize; // We need to spill LR with the args. + } + size_t out_args_size = RoundUp(size, kAapcs64StackAlignment); + if (UNLIKELY(IsCriticalNative())) { + DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u)); + } + return out_args_size; +} + +ArrayRef Arm64JniCallingConvention::CalleeSaveRegisters() const { + if (UNLIKELY(IsCriticalNative())) { + if (UseTailCall()) { + return ArrayRef(); // Do not spill anything. + } else { + // Spill LR with out args. + static_assert((kCoreCalleeSpillMask >> LR) == 1u); // Contains LR as the highest bit. + constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u; + static_assert(kCalleeSaveRegisters[lr_index].Equals( + Arm64ManagedRegister::FromXRegister(LR))); + return ArrayRef(kCalleeSaveRegisters).SubArray( + /*pos*/ lr_index, /*length=*/ 1u); + } + } else { + return ArrayRef(kCalleeSaveRegisters); + } +} + +bool Arm64JniCallingConvention::IsCurrentParamInRegister() { + if (IsCurrentParamAFloatOrDouble()) { + return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments); + } else { + return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments); + } + // TODO: Can we just call CurrentParamRegister to figure this out? +} + +bool Arm64JniCallingConvention::IsCurrentParamOnStack() { + // Is this ever not the same for all the architectures? + return !IsCurrentParamInRegister(); +} + +ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() { + CHECK(IsCurrentParamInRegister()); + if (IsCurrentParamAFloatOrDouble()) { + CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments); + if (IsCurrentParamADouble()) { + return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]); + } else { + return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]); + } + } else { + int gp_reg = itr_args_ - itr_float_and_doubles_; + CHECK_LT(static_cast(gp_reg), kMaxIntLikeRegisterArguments); + if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) { + return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]); + } else { + return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]); + } + } +} + +FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + size_t args_on_stack = itr_args_ + - std::min(kMaxFloatOrDoubleRegisterArguments, + static_cast(itr_float_and_doubles_)) + - std::min(kMaxIntLikeRegisterArguments, + static_cast(itr_args_ - itr_float_and_doubles_)); + size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +ManagedRegister Arm64JniCallingConvention::HiddenArgumentRegister() const { + CHECK(IsCriticalNative()); + // X15 is neither managed callee-save, nor argument register, nor scratch register. + // TODO: Change to static_assert; std::none_of should be constexpr since C++20. + DCHECK(std::none_of(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kCalleeSaveRegisters), + [](ManagedRegister callee_save) constexpr { + return callee_save.Equals(Arm64ManagedRegister::FromXRegister(X15)); + })); + DCHECK(std::none_of(kXArgumentRegisters, + kXArgumentRegisters + std::size(kXArgumentRegisters), + [](XRegister reg) { return reg == X15; })); + DCHECK(!InterproceduralScratchRegister().Equals(Arm64ManagedRegister::FromXRegister(X15))); + return Arm64ManagedRegister::FromXRegister(X15); +} + +// Whether to use tail call (used only for @CriticalNative). +bool Arm64JniCallingConvention::UseTailCall() const { + CHECK(IsCriticalNative()); + return OutArgSize() == 0u; +} + +} // namespace arm64 +} // namespace art diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h new file mode 100644 index 0000000..64b29f1 --- /dev/null +++ b/compiler/jni/quick/arm64/calling_convention_arm64.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ +#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ + +#include "base/enums.h" +#include "jni/quick/calling_convention.h" + +namespace art { +namespace arm64 { + +class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention { + public: + Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, + is_synchronized, + shorty, + PointerSize::k64) {} + ~Arm64ManagedRuntimeCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // Managed runtime calling convention + ManagedRegister MethodRegister() override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + const ManagedRegisterEntrySpills& EntrySpills() override; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention); +}; + +class Arm64JniCallingConvention final : public JniCallingConvention { + public: + Arm64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty); + ~Arm64JniCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister IntReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // JNI calling convention + size_t FrameSize() const override; + size_t OutArgSize() const override; + ArrayRef CalleeSaveRegisters() const override; + ManagedRegister ReturnScratchRegister() const override; + uint32_t CoreSpillMask() const override; + uint32_t FpSpillMask() const override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + + // aarch64 calling convention leaves upper bits undefined. + bool RequiresSmallResultTypeExtension() const override { + return HasSmallReturnType(); + } + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + ManagedRegister HiddenArgumentRegister() const override; + + // Whether to use tail call (used only for @CriticalNative). + bool UseTailCall() const override; + + private: + DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention); +}; + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc new file mode 100644 index 0000000..1943756 --- /dev/null +++ b/compiler/jni/quick/calling_convention.cc @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention.h" + +#include + +#include "arch/instruction_set.h" + +#ifdef ART_ENABLE_CODEGEN_arm +#include "jni/quick/arm/calling_convention_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "jni/quick/arm64/calling_convention_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +#include "jni/quick/x86/calling_convention_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "jni/quick/x86_64/calling_convention_x86_64.h" +#endif + +namespace art { + +// Managed runtime calling convention + +std::unique_ptr ManagedRuntimeCallingConvention::Create( + ArenaAllocator* allocator, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set) { + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm + case InstructionSet::kArm: + case InstructionSet::kThumb2: + return std::unique_ptr( + new (allocator) arm::ArmManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + case InstructionSet::kArm64: + return std::unique_ptr( + new (allocator) arm64::Arm64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case InstructionSet::kX86: + return std::unique_ptr( + new (allocator) x86::X86ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case InstructionSet::kX86_64: + return std::unique_ptr( + new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +bool ManagedRuntimeCallingConvention::HasNext() { + return itr_args_ < NumArgs(); +} + +void ManagedRuntimeCallingConvention::Next() { + CHECK(HasNext()); + if (IsCurrentArgExplicit() && // don't query parameter type of implicit args + IsParamALongOrDouble(itr_args_)) { + itr_longs_and_doubles_++; + itr_slots_++; + } + if (IsParamAFloatOrDouble(itr_args_)) { + itr_float_and_doubles_++; + } + if (IsCurrentParamAReference()) { + itr_refs_++; + } + itr_args_++; + itr_slots_++; +} + +bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() { + // Static methods have no implicit arguments, others implicitly pass this + return IsStatic() || (itr_args_ != 0); +} + +bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() { + return IsCurrentArgExplicit(); // any user parameter may be null +} + +size_t ManagedRuntimeCallingConvention::CurrentParamSize() { + return ParamSize(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() { + return IsParamAReference(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() { + return IsParamAFloatOrDouble(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamADouble() { + return IsParamADouble(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamALong() { + return IsParamALong(itr_args_); +} + +// JNI calling convention + +std::unique_ptr JniCallingConvention::Create(ArenaAllocator* allocator, + bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty, + InstructionSet instruction_set) { + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm + case InstructionSet::kArm: + case InstructionSet::kThumb2: + return std::unique_ptr( + new (allocator) arm::ArmJniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + case InstructionSet::kArm64: + return std::unique_ptr( + new (allocator) arm64::Arm64JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case InstructionSet::kX86: + return std::unique_ptr( + new (allocator) x86::X86JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case InstructionSet::kX86_64: + return std::unique_ptr( + new (allocator) x86_64::X86_64JniCallingConvention( + is_static, is_synchronized, is_critical_native, shorty)); +#endif + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +size_t JniCallingConvention::ReferenceCount() const { + return NumReferenceArgs() + (IsStatic() ? 1 : 0); +} + +FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const { + size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header + return FrameOffset(HandleReferencesOffset().Int32Value() + references_size); +} + +FrameOffset JniCallingConvention::ReturnValueSaveLocation() const { + if (LIKELY(HasHandleScope())) { + // Initial offset already includes the displacement. + // -- Remove the additional local reference cookie offset if we don't have a handle scope. + const size_t saved_local_reference_cookie_offset = + SavedLocalReferenceCookieOffset().Int32Value(); + // Segment state is 4 bytes long + const size_t segment_state_size = 4; + return FrameOffset(saved_local_reference_cookie_offset + segment_state_size); + } else { + // Include only the initial Method* as part of the offset. + CHECK_LT(displacement_.SizeValue(), + static_cast(std::numeric_limits::max())); + return FrameOffset(displacement_.Int32Value() + static_cast(frame_pointer_size_)); + } +} + +bool JniCallingConvention::HasNext() { + if (IsCurrentArgExtraForJni()) { + return true; + } else { + unsigned int arg_pos = GetIteratorPositionWithinShorty(); + return arg_pos < NumArgs(); + } +} + +void JniCallingConvention::Next() { + CHECK(HasNext()); + if (IsCurrentParamALong() || IsCurrentParamADouble()) { + itr_longs_and_doubles_++; + itr_slots_++; + } + if (IsCurrentParamAFloatOrDouble()) { + itr_float_and_doubles_++; + } + if (IsCurrentParamAReference()) { + itr_refs_++; + } + // This default/fallthrough case also covers the extra JNIEnv* argument, + // as well as any other single-slot primitives. + itr_args_++; + itr_slots_++; +} + +bool JniCallingConvention::IsCurrentParamAReference() { + bool return_value; + if (SwitchExtraJniArguments(itr_args_, + false, // JNIEnv* + true, // jobject or jclass + /* out parameters */ + &return_value)) { + return return_value; + } else { + int arg_pos = GetIteratorPositionWithinShorty(); + return IsParamAReference(arg_pos); + } +} + + +bool JniCallingConvention::IsCurrentParamJniEnv() { + if (UNLIKELY(!HasJniEnv())) { + return false; + } + return (itr_args_ == kJniEnv); +} + +bool JniCallingConvention::IsCurrentParamAFloatOrDouble() { + bool return_value; + if (SwitchExtraJniArguments(itr_args_, + false, // jnienv* + false, // jobject or jclass + /* out parameters */ + &return_value)) { + return return_value; + } else { + int arg_pos = GetIteratorPositionWithinShorty(); + return IsParamAFloatOrDouble(arg_pos); + } +} + +bool JniCallingConvention::IsCurrentParamADouble() { + bool return_value; + if (SwitchExtraJniArguments(itr_args_, + false, // jnienv* + false, // jobject or jclass + /* out parameters */ + &return_value)) { + return return_value; + } else { + int arg_pos = GetIteratorPositionWithinShorty(); + return IsParamADouble(arg_pos); + } +} + +bool JniCallingConvention::IsCurrentParamALong() { + bool return_value; + if (SwitchExtraJniArguments(itr_args_, + false, // jnienv* + false, // jobject or jclass + /* out parameters */ + &return_value)) { + return return_value; + } else { + int arg_pos = GetIteratorPositionWithinShorty(); + return IsParamALong(arg_pos); + } +} + +// Return position of handle scope entry holding reference at the current iterator +// position +FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() { + CHECK(IsCurrentParamAReference()); + CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset()); + int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_; + CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value()); + return FrameOffset(result); +} + +size_t JniCallingConvention::CurrentParamSize() const { + if (IsCurrentArgExtraForJni()) { + return static_cast(frame_pointer_size_); // JNIEnv or jobject/jclass + } else { + int arg_pos = GetIteratorPositionWithinShorty(); + return ParamSize(arg_pos); + } +} + +size_t JniCallingConvention::NumberOfExtraArgumentsForJni() const { + if (LIKELY(HasExtraArgumentsForJni())) { + // The first argument is the JNIEnv*. + // Static methods have an extra argument which is the jclass. + return IsStatic() ? 2 : 1; + } else { + // Critical natives exclude the JNIEnv and the jclass/this parameters. + return 0; + } +} + +bool JniCallingConvention::HasSelfClass() const { + if (!IsStatic()) { + // Virtual functions: There is never an implicit jclass parameter. + return false; + } else { + // Static functions: There is an implicit jclass parameter unless it's @CriticalNative. + return HasExtraArgumentsForJni(); + } +} + +unsigned int JniCallingConvention::GetIteratorPositionWithinShorty() const { + // We need to subtract out the extra JNI arguments if we want to use this iterator position + // with the inherited CallingConvention member functions, which rely on scanning the shorty. + // Note that our shorty does *not* include the JNIEnv, jclass/jobject parameters. + DCHECK_GE(itr_args_, NumberOfExtraArgumentsForJni()); + return itr_args_ - NumberOfExtraArgumentsForJni(); +} + +bool JniCallingConvention::IsCurrentArgExtraForJni() const { + if (UNLIKELY(!HasExtraArgumentsForJni())) { + return false; // If there are no extra args, we can never be an extra. + } + // Only parameters kJniEnv and kObjectOrClass are considered extra. + return itr_args_ <= kObjectOrClass; +} + +bool JniCallingConvention::SwitchExtraJniArguments(size_t switch_value, + bool case_jni_env, + bool case_object_or_class, + /* out parameters */ + bool* return_value) const { + DCHECK(return_value != nullptr); + if (UNLIKELY(!HasExtraArgumentsForJni())) { + return false; + } + + switch (switch_value) { + case kJniEnv: + *return_value = case_jni_env; + return true; + case kObjectOrClass: + *return_value = case_object_or_class; + return true; + default: + return false; + } +} + + +} // namespace art diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h new file mode 100644 index 0000000..3d4cefe --- /dev/null +++ b/compiler/jni/quick/calling_convention.h @@ -0,0 +1,478 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ +#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ + +#include "base/arena_object.h" +#include "base/array_ref.h" +#include "base/enums.h" +#include "dex/primitive.h" +#include "handle_scope.h" +#include "thread.h" +#include "utils/managed_register.h" + +namespace art { + +enum class InstructionSet; + +// Top-level abstraction for different calling conventions. +class CallingConvention : public DeletableArenaObject { + public: + bool IsReturnAReference() const { return shorty_[0] == 'L'; } + + Primitive::Type GetReturnType() const { + return Primitive::GetType(shorty_[0]); + } + + size_t SizeOfReturnValue() const { + size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0])); + if (result >= 1 && result < 4) { + result = 4; + } + return result; + } + + // Register that holds result of this method invocation. + virtual ManagedRegister ReturnRegister() = 0; + // Register reserved for scratch usage during procedure calls. + virtual ManagedRegister InterproceduralScratchRegister() const = 0; + + // Iterator interface + + // Place iterator at start of arguments. The displacement is applied to + // frame offset methods to account for frames which may be on the stack + // below the one being iterated over. + void ResetIterator(FrameOffset displacement) { + displacement_ = displacement; + itr_slots_ = 0; + itr_args_ = 0; + itr_refs_ = 0; + itr_longs_and_doubles_ = 0; + itr_float_and_doubles_ = 0; + } + + FrameOffset GetDisplacement() const { + return displacement_; + } + + PointerSize GetFramePointerSize() const { + return frame_pointer_size_; + } + + virtual ~CallingConvention() {} + + protected: + CallingConvention(bool is_static, + bool is_synchronized, + const char* shorty, + PointerSize frame_pointer_size) + : itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0), + itr_float_and_doubles_(0), displacement_(0), + frame_pointer_size_(frame_pointer_size), + handle_scope_pointer_size_(sizeof(StackReference)), + is_static_(is_static), is_synchronized_(is_synchronized), + shorty_(shorty) { + num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1; + num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer. + num_float_or_double_args_ = 0; + num_long_or_double_args_ = 0; + for (size_t i = 1; i < strlen(shorty); i++) { + char ch = shorty_[i]; + switch (ch) { + case 'L': + num_ref_args_++; + break; + case 'J': + num_long_or_double_args_++; + break; + case 'D': + num_long_or_double_args_++; + num_float_or_double_args_++; + break; + case 'F': + num_float_or_double_args_++; + break; + } + } + } + + bool IsStatic() const { + return is_static_; + } + bool IsSynchronized() const { + return is_synchronized_; + } + bool IsParamALongOrDouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + char ch = shorty_[param]; + return (ch == 'J' || ch == 'D'); + } + bool IsParamAFloatOrDouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + char ch = shorty_[param]; + return (ch == 'F' || ch == 'D'); + } + bool IsParamADouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + return shorty_[param] == 'D'; + } + bool IsParamALong(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + return shorty_[param] == 'J'; + } + bool IsParamAReference(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return true; // this argument + } + return shorty_[param] == 'L'; + } + size_t NumArgs() const { + return num_args_; + } + // Implicit argument count: 1 for instance functions, 0 for static functions. + // (The implicit argument is only relevant to the shorty, i.e. + // the 0th arg is not in the shorty if it's implicit). + size_t NumImplicitArgs() const { + return IsStatic() ? 0 : 1; + } + size_t NumLongOrDoubleArgs() const { + return num_long_or_double_args_; + } + size_t NumFloatOrDoubleArgs() const { + return num_float_or_double_args_; + } + size_t NumReferenceArgs() const { + return num_ref_args_; + } + size_t ParamSize(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return sizeof(mirror::HeapReference); // this argument + } + size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param])); + if (result >= 1 && result < 4) { + result = 4; + } + return result; + } + const char* GetShorty() const { + return shorty_.c_str(); + } + // The slot number for current calling_convention argument. + // Note that each slot is 32-bit. When the current argument is bigger + // than 32 bits, return the first slot number for this argument. + unsigned int itr_slots_; + // The number of references iterated past. + unsigned int itr_refs_; + // The argument number along argument list for current argument. + unsigned int itr_args_; + // Number of longs and doubles seen along argument list. + unsigned int itr_longs_and_doubles_; + // Number of float and doubles seen along argument list. + unsigned int itr_float_and_doubles_; + // Space for frames below this on the stack. + FrameOffset displacement_; + // The size of a pointer. + const PointerSize frame_pointer_size_; + // The size of a reference entry within the handle scope. + const size_t handle_scope_pointer_size_; + + private: + const bool is_static_; + const bool is_synchronized_; + std::string shorty_; + size_t num_args_; + size_t num_ref_args_; + size_t num_float_or_double_args_; + size_t num_long_or_double_args_; +}; + +// Abstraction for managed code's calling conventions +// | { Incoming stack args } | +// | { Prior Method* } | <-- Prior SP +// | { Return address } | +// | { Callee saves } | +// | { Spills ... } | +// | { Outgoing stack args } | +// | { Method* } | <-- SP +class ManagedRuntimeCallingConvention : public CallingConvention { + public: + static std::unique_ptr Create(ArenaAllocator* allocator, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set); + + // Offset of Method within the managed frame. + FrameOffset MethodStackOffset() { + return FrameOffset(0u); + } + + // Register that holds the incoming method argument + virtual ManagedRegister MethodRegister() = 0; + + // Iterator interface + bool HasNext(); + void Next(); + bool IsCurrentParamAReference(); + bool IsCurrentParamAFloatOrDouble(); + bool IsCurrentParamADouble(); + bool IsCurrentParamALong(); + bool IsCurrentArgExplicit(); // ie a non-implict argument such as this + bool IsCurrentArgPossiblyNull(); + size_t CurrentParamSize(); + virtual bool IsCurrentParamInRegister() = 0; + virtual bool IsCurrentParamOnStack() = 0; + virtual ManagedRegister CurrentParamRegister() = 0; + virtual FrameOffset CurrentParamStackOffset() = 0; + + virtual ~ManagedRuntimeCallingConvention() {} + + // Registers to spill to caller's out registers on entry. + virtual const ManagedRegisterEntrySpills& EntrySpills() = 0; + + protected: + ManagedRuntimeCallingConvention(bool is_static, + bool is_synchronized, + const char* shorty, + PointerSize frame_pointer_size) + : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {} +}; + +// Abstraction for JNI calling conventions +// | { Incoming stack args } | <-- Prior SP +// | { Return address } | +// | { Callee saves } | ([1]) +// | { Return value spill } | (live on return slow paths) +// | { Local Ref. Table State } | +// | { Stack Indirect Ref. Table | +// | num. refs./link } | (here to prior SP is frame size) +// | { Method* } | <-- Anchor SP written to thread +// | { Outgoing stack args } | <-- SP at point of call +// | Native frame | +// +// [1] We must save all callee saves here to enable any exception throws to restore +// callee saves for frames above this one. +class JniCallingConvention : public CallingConvention { + public: + static std::unique_ptr Create(ArenaAllocator* allocator, + bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty, + InstructionSet instruction_set); + + // Size of frame excluding space for outgoing args (its assumed Method* is + // always at the bottom of a frame, but this doesn't work for outgoing + // native args). Includes alignment. + virtual size_t FrameSize() const = 0; + // Size of outgoing arguments (stack portion), including alignment. + // -- Arguments that are passed via registers are excluded from this size. + virtual size_t OutArgSize() const = 0; + // Number of references in stack indirect reference table + size_t ReferenceCount() const; + // Location where the segment state of the local indirect reference table is saved + FrameOffset SavedLocalReferenceCookieOffset() const; + // Location where the return value of a call can be squirreled if another + // call is made following the native call + FrameOffset ReturnValueSaveLocation() const; + // Register that holds result if it is integer. + virtual ManagedRegister IntReturnRegister() = 0; + // Whether the compiler needs to ensure zero-/sign-extension of a small result type + virtual bool RequiresSmallResultTypeExtension() const = 0; + + // Callee save registers to spill prior to native code (which may clobber) + virtual ArrayRef CalleeSaveRegisters() const = 0; + + // Spill mask values + virtual uint32_t CoreSpillMask() const = 0; + virtual uint32_t FpSpillMask() const = 0; + + // An extra scratch register live after the call + virtual ManagedRegister ReturnScratchRegister() const = 0; + + // Iterator interface + bool HasNext(); + virtual void Next(); + bool IsCurrentParamAReference(); + bool IsCurrentParamAFloatOrDouble(); + bool IsCurrentParamADouble(); + bool IsCurrentParamALong(); + bool IsCurrentParamALongOrDouble() { + return IsCurrentParamALong() || IsCurrentParamADouble(); + } + bool IsCurrentParamJniEnv(); + size_t CurrentParamSize() const; + virtual bool IsCurrentParamInRegister() = 0; + virtual bool IsCurrentParamOnStack() = 0; + virtual ManagedRegister CurrentParamRegister() = 0; + virtual FrameOffset CurrentParamStackOffset() = 0; + + // Iterator interface extension for JNI + FrameOffset CurrentParamHandleScopeEntryOffset(); + + // Position of handle scope and interior fields + FrameOffset HandleScopeOffset() const { + return FrameOffset(this->displacement_.Int32Value() + static_cast(frame_pointer_size_)); + // above Method reference + } + + FrameOffset HandleScopeLinkOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::LinkOffset(frame_pointer_size_)); + } + + FrameOffset HandleScopeNumRefsOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::NumberOfReferencesOffset(frame_pointer_size_)); + } + + FrameOffset HandleReferencesOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::ReferencesOffset(frame_pointer_size_)); + } + + virtual ~JniCallingConvention() {} + + bool IsCriticalNative() const { + return is_critical_native_; + } + + // Does the transition have a method pointer in the stack frame? + bool SpillsMethod() const { + // Exclude method pointer for @CriticalNative methods for optimization speed. + return !IsCriticalNative(); + } + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + virtual ManagedRegister HiddenArgumentRegister() const = 0; + + // Whether to use tail call (used only for @CriticalNative). + virtual bool UseTailCall() const = 0; + + // Whether the return type is small. Used for RequiresSmallResultTypeExtension() + // on architectures that require the sign/zero extension. + bool HasSmallReturnType() const { + Primitive::Type return_type = GetReturnType(); + return return_type == Primitive::kPrimByte || + return_type == Primitive::kPrimShort || + return_type == Primitive::kPrimBoolean || + return_type == Primitive::kPrimChar; + } + + protected: + // Named iterator positions + enum IteratorPos { + kJniEnv = 0, + kObjectOrClass = 1 + }; + + JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty, + PointerSize frame_pointer_size) + : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size), + is_critical_native_(is_critical_native) {} + + protected: + size_t NumberOfExtraArgumentsForJni() const; + + // Does the transition have a StackHandleScope? + bool HasHandleScope() const { + // Exclude HandleScope for @CriticalNative methods for optimization speed. + return !IsCriticalNative(); + } + + // Does the transition have a local reference segment state? + bool HasLocalReferenceSegmentState() const { + // Exclude local reference segment states for @CriticalNative methods for optimization speed. + return !IsCriticalNative(); + } + + // Does the transition back spill the return value in the stack frame? + bool SpillsReturnValue() const { + // Exclude return value for @CriticalNative methods for optimization speed. + return !IsCriticalNative(); + } + + // Are there extra JNI arguments (JNIEnv* and maybe jclass)? + bool HasExtraArgumentsForJni() const { + // @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters. + return !IsCriticalNative(); + } + + // Has a JNIEnv* parameter implicitly? + bool HasJniEnv() const { + // Exclude "JNIEnv*" parameter for @CriticalNative methods. + return HasExtraArgumentsForJni(); + } + + // Has a 'jclass' parameter implicitly? + bool HasSelfClass() const; + + // Returns the position of itr_args_, fixed up by removing the offset of extra JNI arguments. + unsigned int GetIteratorPositionWithinShorty() const; + + // Is the current argument (at the iterator) an extra argument for JNI? + bool IsCurrentArgExtraForJni() const; + + const bool is_critical_native_; + + private: + // Shorthand for switching on the switch value but only IF there are extra JNI arguments. + // + // Puts the case value into return_value. + // * (switch_value == kJniEnv) => case_jni_env + // * (switch_value == kObjectOrClass) => case_object_or_class + // + // Returns false otherwise (or if there are no extra JNI arguments). + bool SwitchExtraJniArguments(size_t switch_value, + bool case_jni_env, + bool case_object_or_class, + /* out parameters */ + bool* return_value) const; +}; + +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc new file mode 100644 index 0000000..c2db73a --- /dev/null +++ b/compiler/jni/quick/jni_compiler.cc @@ -0,0 +1,800 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_compiler.h" + +#include +#include +#include +#include +#include + +#include "art_method.h" +#include "base/arena_allocator.h" +#include "base/enums.h" +#include "base/logging.h" // For VLOG. +#include "base/macros.h" +#include "base/malloc_arena_pool.h" +#include "base/memory_region.h" +#include "base/utils.h" +#include "calling_convention.h" +#include "class_linker.h" +#include "dwarf/debug_frame_opcode_writer.h" +#include "dex/dex_file-inl.h" +#include "driver/compiler_options.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "jni/jni_env_ext.h" +#include "thread.h" +#include "utils/arm/managed_register_arm.h" +#include "utils/arm64/managed_register_arm64.h" +#include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" +#include "utils/managed_register.h" +#include "utils/x86/managed_register_x86.h" + +#define __ jni_asm-> + +namespace art { + +template +static void CopyParameter(JNIMacroAssembler* jni_asm, + ManagedRuntimeCallingConvention* mr_conv, + JniCallingConvention* jni_conv); +template +static void SetNativeParameter(JNIMacroAssembler* jni_asm, + JniCallingConvention* jni_conv, + ManagedRegister in_reg); + +template +static std::unique_ptr> GetMacroAssembler( + ArenaAllocator* allocator, InstructionSet isa, const InstructionSetFeatures* features) { + return JNIMacroAssembler::Create(allocator, isa, features); +} + +enum class JniEntrypoint { + kStart, + kEnd +}; + +template +static ThreadOffset GetJniEntrypointThreadOffset(JniEntrypoint which, + bool reference_return, + bool is_synchronized, + bool is_fast_native) { + if (which == JniEntrypoint::kStart) { // JniMethodStart + ThreadOffset jni_start = + is_synchronized + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStartSynchronized) + : (is_fast_native + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastStart) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStart)); + + return jni_start; + } else { // JniMethodEnd + ThreadOffset jni_end(-1); + if (reference_return) { + // Pass result. + jni_end = is_synchronized + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReferenceSynchronized) + : (is_fast_native + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastEndWithReference) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReference)); + } else { + jni_end = is_synchronized + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndSynchronized) + : (is_fast_native + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastEnd) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEnd)); + } + + return jni_end; + } +} + + +// Generate the JNI bridge for the given method, general contract: +// - Arguments are in the managed runtime format, either on stack or in +// registers, a reference to the method object is supplied as part of this +// convention. +// +template +static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& compiler_options, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) { + const bool is_native = (access_flags & kAccNative) != 0; + CHECK(is_native); + const bool is_static = (access_flags & kAccStatic) != 0; + const bool is_synchronized = (access_flags & kAccSynchronized) != 0; + const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); + InstructionSet instruction_set = compiler_options.GetInstructionSet(); + const InstructionSetFeatures* instruction_set_features = + compiler_options.GetInstructionSetFeatures(); + + // i.e. if the method was annotated with @FastNative + const bool is_fast_native = (access_flags & kAccFastNative) != 0u; + + // i.e. if the method was annotated with @CriticalNative + const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u; + + VLOG(jni) << "JniCompile: Method :: " + << dex_file.PrettyMethod(method_idx, /* with signature */ true) + << " :: access_flags = " << std::hex << access_flags << std::dec; + + if (UNLIKELY(is_fast_native)) { + VLOG(jni) << "JniCompile: Fast native method detected :: " + << dex_file.PrettyMethod(method_idx, /* with signature */ true); + } + + if (UNLIKELY(is_critical_native)) { + VLOG(jni) << "JniCompile: Critical native method detected :: " + << dex_file.PrettyMethod(method_idx, /* with signature */ true); + } + + if (kIsDebugBuild) { + // Don't allow both @FastNative and @CriticalNative. They are mutually exclusive. + if (UNLIKELY(is_fast_native && is_critical_native)) { + LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative" + << dex_file.PrettyMethod(method_idx, /* with_signature= */ true); + } + + // @CriticalNative - extra checks: + // -- Don't allow virtual criticals + // -- Don't allow synchronized criticals + // -- Don't allow any objects as parameter or return value + if (UNLIKELY(is_critical_native)) { + CHECK(is_static) + << "@CriticalNative functions cannot be virtual since that would" + << "require passing a reference parameter (this), which is illegal " + << dex_file.PrettyMethod(method_idx, /* with_signature= */ true); + CHECK(!is_synchronized) + << "@CriticalNative functions cannot be synchronized since that would" + << "require passing a (class and/or this) reference parameter, which is illegal " + << dex_file.PrettyMethod(method_idx, /* with_signature= */ true); + for (size_t i = 0; i < strlen(shorty); ++i) { + CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i])) + << "@CriticalNative methods' shorty types must not have illegal references " + << dex_file.PrettyMethod(method_idx, /* with_signature= */ true); + } + } + } + + MallocArenaPool pool; + ArenaAllocator allocator(&pool); + + // Calling conventions used to iterate over parameters to method + std::unique_ptr main_jni_conv = + JniCallingConvention::Create(&allocator, + is_static, + is_synchronized, + is_critical_native, + shorty, + instruction_set); + bool reference_return = main_jni_conv->IsReturnAReference(); + + std::unique_ptr mr_conv( + ManagedRuntimeCallingConvention::Create( + &allocator, is_static, is_synchronized, shorty, instruction_set)); + + // Calling conventions to call into JNI method "end" possibly passing a returned reference, the + // method and the current thread. + const char* jni_end_shorty; + if (reference_return && is_synchronized) { + jni_end_shorty = "ILL"; + } else if (reference_return) { + jni_end_shorty = "IL"; + } else if (is_synchronized) { + jni_end_shorty = "VL"; + } else { + jni_end_shorty = "V"; + } + + std::unique_ptr end_jni_conv( + JniCallingConvention::Create(&allocator, + is_static, + is_synchronized, + is_critical_native, + jni_end_shorty, + instruction_set)); + + // Assembler that holds generated instructions + std::unique_ptr> jni_asm = + GetMacroAssembler(&allocator, instruction_set, instruction_set_features); + jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo()); + jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode()); + + // 1. Build the frame saving all callee saves, Method*, and PC return address. + // For @CriticalNative, this includes space for out args, otherwise just the managed frame. + const size_t managed_frame_size = main_jni_conv->FrameSize(); + const size_t main_out_arg_size = main_jni_conv->OutArgSize(); + size_t current_frame_size = is_critical_native ? main_out_arg_size : managed_frame_size; + ManagedRegister method_register = + is_critical_native ? ManagedRegister::NoRegister() : mr_conv->MethodRegister(); + ArrayRef callee_save_regs = main_jni_conv->CalleeSaveRegisters(); + __ BuildFrame(current_frame_size, method_register, callee_save_regs, mr_conv->EntrySpills()); + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(current_frame_size)); + + if (LIKELY(!is_critical_native)) { + // NOTE: @CriticalNative methods don't have a HandleScope + // because they can't have any reference parameters or return values. + + // 2. Set up the HandleScope + mr_conv->ResetIterator(FrameOffset(current_frame_size)); + main_jni_conv->ResetIterator(FrameOffset(0)); + __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(), + main_jni_conv->ReferenceCount(), + mr_conv->InterproceduralScratchRegister()); + + __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(), + Thread::TopHandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); + __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset(), + main_jni_conv->HandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); + + // 3. Place incoming reference arguments into handle scope + main_jni_conv->Next(); // Skip JNIEnv* + // 3.5. Create Class argument for static methods out of passed method + if (is_static) { + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame + CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size); + // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod. + // Note this LoadRef() does not include read barrier. It will be handled below. + // + // scratchRegister = *method[DeclaringClassOffset()]; + __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), + mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); + __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); + // *handleScopeOffset = scratchRegister + __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->Next(); // in handle scope so move to next argument + } + // Place every reference into the handle scope (ignore other parameters). + while (mr_conv->HasNext()) { + CHECK(main_jni_conv->HasNext()); + bool ref_param = main_jni_conv->IsCurrentParamAReference(); + CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); + // References need placing in handle scope and the entry value passing + if (ref_param) { + // Compute handle scope entry, note null is placed in the handle scope but its boxed value + // must be null. + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame and doesn't run into the saved segment state. + CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size); + CHECK_NE(handle_scope_offset.Uint32Value(), + main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value()); + bool input_in_reg = mr_conv->IsCurrentParamInRegister(); + bool input_on_stack = mr_conv->IsCurrentParamOnStack(); + CHECK(input_in_reg || input_on_stack); + + if (input_in_reg) { + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull()); + __ StoreRef(handle_scope_offset, in_reg); + } else if (input_on_stack) { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull()); + __ CopyRef(handle_scope_offset, in_off, + mr_conv->InterproceduralScratchRegister()); + } + } + mr_conv->Next(); + main_jni_conv->Next(); + } + + // 4. Write out the end of the quick frames. + __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset()); + + // NOTE: @CriticalNative does not need to store the stack pointer to the thread + // because garbage collections are disabled within the execution of a + // @CriticalNative method. + // (TODO: We could probably disable it for @FastNative too). + } // if (!is_critical_native) + + // 5. Move frame down to allow space for out going args. + size_t current_out_arg_size = main_out_arg_size; + if (UNLIKELY(is_critical_native)) { + DCHECK_EQ(main_out_arg_size, current_frame_size); + // Move the method pointer to the hidden argument register. + __ Move(main_jni_conv->HiddenArgumentRegister(), + mr_conv->MethodRegister(), + static_cast(main_jni_conv->GetFramePointerSize())); + } else { + __ IncreaseFrameSize(main_out_arg_size); + current_frame_size += main_out_arg_size; + } + + // Call the read barrier for the declaring class loaded from the method for a static call. + // Skip this for @CriticalNative because we didn't build a HandleScope to begin with. + // Note that we always have outgoing param space available for at least two params. + if (kUseReadBarrier && is_static && !is_critical_native) { + const bool kReadBarrierFastPath = true; // Always true after Mips codegen was removed. + std::unique_ptr skip_cold_path_label; + if (kReadBarrierFastPath) { + skip_cold_path_label = __ CreateLabel(); + // Fast path for supported targets. + // + // Check if gc_is_marking is set -- if it's not, we don't need + // a read barrier so skip it. + __ LoadFromThread(main_jni_conv->InterproceduralScratchRegister(), + Thread::IsGcMarkingOffset(), + Thread::IsGcMarkingSize()); + // Jump over the slow path if gc is marking is false. + __ Jump(skip_cold_path_label.get(), + JNIMacroUnaryCondition::kZero, + main_jni_conv->InterproceduralScratchRegister()); + } + + // Construct slow path for read barrier: + // + // Call into the runtime's ReadBarrierJni and have it fix up + // the object address if it was moved. + + ThreadOffset read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize, + pReadBarrierJni); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + main_jni_conv->Next(); // Skip JNIEnv. + FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + // Pass the handle for the class as the first argument. + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + main_jni_conv->Next(); + // Pass the current thread as the second argument and call. + if (main_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); + __ Call(main_jni_conv->CurrentParamRegister(), + Offset(read_barrier), + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), + main_jni_conv->InterproceduralScratchRegister()); + __ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister()); + } + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset. + + if (kReadBarrierFastPath) { + __ Bind(skip_cold_path_label.get()); + } + } + + // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable + // can occur. The result is the saved JNI local state that is restored by the exit call. We + // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer + // arguments. + FrameOffset locked_object_handle_scope_offset(0xBEEFDEAD); + FrameOffset saved_cookie_offset( + FrameOffset(0xDEADBEEFu)); // @CriticalNative - use obviously bad value for debugging + if (LIKELY(!is_critical_native)) { + // Skip this for @CriticalNative methods. They do not call JniMethodStart. + ThreadOffset jni_start( + GetJniEntrypointThreadOffset(JniEntrypoint::kStart, + reference_return, + is_synchronized, + is_fast_native).SizeValue()); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + locked_object_handle_scope_offset = FrameOffset(0); + if (is_synchronized) { + // Pass object for locking. + main_jni_conv->Next(); // Skip JNIEnv. + locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + main_jni_conv->Next(); + } + if (main_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); + __ Call(main_jni_conv->CurrentParamRegister(), + Offset(jni_start), + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), + main_jni_conv->InterproceduralScratchRegister()); + __ CallFromThread(jni_start, main_jni_conv->InterproceduralScratchRegister()); + } + if (is_synchronized) { // Check for exceptions from monitor enter. + __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size); + } + + // Store into stack_frame[saved_cookie_offset] the return value of JniMethodStart. + saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset(); + __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4 /* sizeof cookie */); + } + + // 7. Iterate over arguments placing values from managed calling convention in + // to the convention required for a native call (shuffling). For references + // place an index/pointer to the reference after checking whether it is + // null (which must be encoded as null). + // Note: we do this prior to materializing the JNIEnv* and static's jclass to + // give as many free registers for the shuffle as possible. + mr_conv->ResetIterator(FrameOffset(current_frame_size)); + uint32_t args_count = 0; + while (mr_conv->HasNext()) { + args_count++; + mr_conv->Next(); + } + + // Do a backward pass over arguments, so that the generated code will be "mov + // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3." + // TODO: A reverse iterator to improve readability. + // TODO: This is currently useless as all archs spill args when building the frame. + // To avoid the full spilling, we would have to do one pass before the BuildFrame() + // to determine which arg registers are clobbered before they are needed. + // TODO: For @CriticalNative, do a forward pass because there are no JNIEnv* and jclass* args. + for (uint32_t i = 0; i < args_count; ++i) { + mr_conv->ResetIterator(FrameOffset(current_frame_size)); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + + // Skip the extra JNI parameters for now. + if (LIKELY(!is_critical_native)) { + main_jni_conv->Next(); // Skip JNIEnv*. + if (is_static) { + main_jni_conv->Next(); // Skip Class for now. + } + } + // Skip to the argument we're interested in. + for (uint32_t j = 0; j < args_count - i - 1; ++j) { + mr_conv->Next(); + main_jni_conv->Next(); + } + CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get()); + } + if (is_static && !is_critical_native) { + // Create argument for Class + mr_conv->ResetIterator(FrameOffset(current_frame_size)); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + main_jni_conv->Next(); // Skip JNIEnv* + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + } + + // Set the iterator back to the incoming Method*. + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + if (LIKELY(!is_critical_native)) { + // 8. Create 1st argument, the JNI environment ptr. + // Register that will hold local indirect reference table + if (main_jni_conv->IsCurrentParamInRegister()) { + ManagedRegister jni_env = main_jni_conv->CurrentParamRegister(); + DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister())); + __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset()); + } else { + FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); + __ CopyRawPtrFromThread(jni_env, + Thread::JniEnvOffset(), + main_jni_conv->InterproceduralScratchRegister()); + } + } + + // 9. Plant call to native code associated with method. + MemberOffset jni_entrypoint_offset = + ArtMethod::EntryPointFromJniOffset(InstructionSetPointerSize(instruction_set)); + if (UNLIKELY(is_critical_native)) { + if (main_jni_conv->UseTailCall()) { + __ Jump(main_jni_conv->HiddenArgumentRegister(), + jni_entrypoint_offset, + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ Call(main_jni_conv->HiddenArgumentRegister(), + jni_entrypoint_offset, + main_jni_conv->InterproceduralScratchRegister()); + } + } else { + __ Call(FrameOffset(main_out_arg_size + mr_conv->MethodStackOffset().SizeValue()), + jni_entrypoint_offset, + main_jni_conv->InterproceduralScratchRegister()); + } + + // 10. Fix differences in result widths. + if (main_jni_conv->RequiresSmallResultTypeExtension()) { + DCHECK(main_jni_conv->HasSmallReturnType()); + CHECK(!is_critical_native || !main_jni_conv->UseTailCall()); + if (main_jni_conv->GetReturnType() == Primitive::kPrimByte || + main_jni_conv->GetReturnType() == Primitive::kPrimShort) { + __ SignExtend(main_jni_conv->ReturnRegister(), + Primitive::ComponentSize(main_jni_conv->GetReturnType())); + } else { + CHECK(main_jni_conv->GetReturnType() == Primitive::kPrimBoolean || + main_jni_conv->GetReturnType() == Primitive::kPrimChar); + __ ZeroExtend(main_jni_conv->ReturnRegister(), + Primitive::ComponentSize(main_jni_conv->GetReturnType())); + } + } + + // 11. Process return value + FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation(); + if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + if (LIKELY(!is_critical_native)) { + // For normal JNI, store the return value on the stack because the call to + // JniMethodEnd will clobber the return value. It will be restored in (13). + CHECK_LT(return_save_location.Uint32Value(), current_frame_size); + __ Store(return_save_location, + main_jni_conv->ReturnRegister(), + main_jni_conv->SizeOfReturnValue()); + } else { + // For @CriticalNative only, + // move the JNI return register into the managed return register (if they don't match). + ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister(); + ManagedRegister mr_return_reg = mr_conv->ReturnRegister(); + + // Check if the JNI return register matches the managed return register. + // If they differ, only then do we have to do anything about it. + // Otherwise the return value is already in the right place when we return. + if (!jni_return_reg.Equals(mr_return_reg)) { + CHECK(!main_jni_conv->UseTailCall()); + // This is typically only necessary on ARM32 due to native being softfloat + // while managed is hardfloat. + // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0. + __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue()); + } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) { + // Sanity check: If the return value is passed on the stack for some reason, + // then make sure the size matches. + CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue()); + } + } + } + + if (LIKELY(!is_critical_native)) { + // Increase frame size for out args if needed by the end_jni_conv. + const size_t end_out_arg_size = end_jni_conv->OutArgSize(); + if (end_out_arg_size > current_out_arg_size) { + size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size; + current_out_arg_size = end_out_arg_size; + __ IncreaseFrameSize(out_arg_size_diff); + current_frame_size += out_arg_size_diff; + saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff); + locked_object_handle_scope_offset = + FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff); + return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff); + } + end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); + + // 12. Call JniMethodEnd + ThreadOffset jni_end( + GetJniEntrypointThreadOffset(JniEntrypoint::kEnd, + reference_return, + is_synchronized, + is_fast_native).SizeValue()); + if (reference_return) { + // Pass result. + SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); + end_jni_conv->Next(); + } + // Pass saved local reference state. + if (end_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); + __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4); + } else { + ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); + __ Load(out_reg, saved_cookie_offset, 4); + } + end_jni_conv->Next(); + if (is_synchronized) { + // Pass object for unlocking. + if (end_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, + end_jni_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + end_jni_conv->Next(); + } + if (end_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(end_jni_conv->CurrentParamRegister()); + __ Call(end_jni_conv->CurrentParamRegister(), + Offset(jni_end), + end_jni_conv->InterproceduralScratchRegister()); + } else { + __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(), + end_jni_conv->InterproceduralScratchRegister()); + __ CallFromThread(jni_end, end_jni_conv->InterproceduralScratchRegister()); + } + + // 13. Reload return value + if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue()); + // NIT: If it's @CriticalNative then we actually only need to do this IF + // the calling convention's native return register doesn't match the managed convention's + // return register. + } + } // if (!is_critical_native) + + // 14. Move frame up now we're done with the out arg space. + // @CriticalNative remove out args together with the frame in RemoveFrame(). + if (LIKELY(!is_critical_native)) { + __ DecreaseFrameSize(current_out_arg_size); + current_frame_size -= current_out_arg_size; + } + + // 15. Process pending exceptions from JNI call or monitor exit. + // @CriticalNative methods do not need exception poll in the stub. + if (LIKELY(!is_critical_native)) { + __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */); + } + + // 16. Remove activation - need to restore callee save registers since the GC may have changed + // them. + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(current_frame_size)); + if (LIKELY(!is_critical_native) || !main_jni_conv->UseTailCall()) { + // We expect the compiled method to possibly be suspended during its + // execution, except in the case of a CriticalNative method. + bool may_suspend = !is_critical_native; + __ RemoveFrame(current_frame_size, callee_save_regs, may_suspend); + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(current_frame_size)); + } + + // 17. Finalize code generation + __ FinalizeCode(); + size_t cs = __ CodeSize(); + std::vector managed_code(cs); + MemoryRegion code(&managed_code[0], managed_code.size()); + __ FinalizeInstructions(code); + + return JniCompiledMethod(instruction_set, + std::move(managed_code), + managed_frame_size, + main_jni_conv->CoreSpillMask(), + main_jni_conv->FpSpillMask(), + ArrayRef(*jni_asm->cfi().data())); +} + +// Copy a single parameter from the managed to the JNI calling convention. +template +static void CopyParameter(JNIMacroAssembler* jni_asm, + ManagedRuntimeCallingConvention* mr_conv, + JniCallingConvention* jni_conv) { + bool input_in_reg = mr_conv->IsCurrentParamInRegister(); + bool output_in_reg = jni_conv->IsCurrentParamInRegister(); + FrameOffset handle_scope_offset(0); + bool null_allowed = false; + bool ref_param = jni_conv->IsCurrentParamAReference(); + CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); + // input may be in register, on stack or both - but not none! + CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); + if (output_in_reg) { // output shouldn't straddle registers and stack + CHECK(!jni_conv->IsCurrentParamOnStack()); + } else { + CHECK(jni_conv->IsCurrentParamOnStack()); + } + // References need placing in handle scope and the entry address passing. + if (ref_param) { + null_allowed = mr_conv->IsCurrentArgPossiblyNull(); + // Compute handle scope offset. Note null is placed in the handle scope but the jobject + // passed to the native code must be null (not a pointer into the handle scope + // as with regular references). + handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame. + CHECK_LT(handle_scope_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value()); + } + if (input_in_reg && output_in_reg) { + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + if (ref_param) { + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed); + } else { + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling move + __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); + } else { + UNIMPLEMENTED(FATAL); // we currently don't expect to see this case + } + } + } else if (!input_in_reg && !output_in_reg) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + if (ref_param) { + __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); + } + } else if (!input_in_reg && output_in_reg) { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + // Check that incoming stack arguments are above the current stack frame. + CHECK_GT(in_off.Uint32Value(), mr_conv->GetDisplacement().Uint32Value()); + if (ref_param) { + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Load(out_reg, in_off, param_size); + } + } else { + CHECK(input_in_reg && !output_in_reg); + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + // Check outgoing argument is within frame part dedicated to out args. + CHECK_LT(out_off.Uint32Value(), jni_conv->GetDisplacement().Uint32Value()); + if (ref_param) { + // TODO: recycle value in in_reg rather than reload from handle scope + __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling store + __ Store(out_off, in_reg, param_size); + } else { + // store where input straddles registers and stack + CHECK_EQ(param_size, 8u); + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); + } + } + } +} + +template +static void SetNativeParameter(JNIMacroAssembler* jni_asm, + JniCallingConvention* jni_conv, + ManagedRegister in_reg) { + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset dest = jni_conv->CurrentParamStackOffset(); + __ StoreRawPtr(dest, in_reg); + } else { + if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { + __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); + } + } +} + +JniCompiledMethod ArtQuickJniCompileMethod(const CompilerOptions& compiler_options, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) { + if (Is64BitInstructionSet(compiler_options.GetInstructionSet())) { + return ArtJniCompileMethodInternal( + compiler_options, access_flags, method_idx, dex_file); + } else { + return ArtJniCompileMethodInternal( + compiler_options, access_flags, method_idx, dex_file); + } +} + +} // namespace art diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h new file mode 100644 index 0000000..313fcd3 --- /dev/null +++ b/compiler/jni/quick/jni_compiler.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ +#define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ + +#include + +#include "arch/instruction_set.h" +#include "base/array_ref.h" + +namespace art { + +class ArtMethod; +class CompilerOptions; +class DexFile; + +class JniCompiledMethod { + public: + JniCompiledMethod(InstructionSet instruction_set, + std::vector&& code, + uint32_t frame_size, + uint32_t core_spill_mask, + uint32_t fp_spill_mask, + ArrayRef cfi) + : instruction_set_(instruction_set), + code_(std::move(code)), + frame_size_(frame_size), + core_spill_mask_(core_spill_mask), + fp_spill_mask_(fp_spill_mask), + cfi_(cfi.begin(), cfi.end()) {} + + JniCompiledMethod(JniCompiledMethod&& other) = default; + ~JniCompiledMethod() = default; + + InstructionSet GetInstructionSet() const { return instruction_set_; } + ArrayRef GetCode() const { return ArrayRef(code_); } + uint32_t GetFrameSize() const { return frame_size_; } + uint32_t GetCoreSpillMask() const { return core_spill_mask_; } + uint32_t GetFpSpillMask() const { return fp_spill_mask_; } + ArrayRef GetCfi() const { return ArrayRef(cfi_); } + + private: + InstructionSet instruction_set_; + std::vector code_; + uint32_t frame_size_; + uint32_t core_spill_mask_; + uint32_t fp_spill_mask_; + std::vector cfi_; +}; + +JniCompiledMethod ArtQuickJniCompileMethod(const CompilerOptions& compiler_options, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file); + +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc new file mode 100644 index 0000000..4e643ba --- /dev/null +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_x86.h" + +#include + +#include "arch/instruction_set.h" +#include "arch/x86/jni_frame_x86.h" +#include "handle_scope-inl.h" +#include "utils/x86/managed_register_x86.h" + +namespace art { +namespace x86 { + +static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size"); + +static constexpr ManagedRegister kCalleeSaveRegisters[] = { + // Core registers. + X86ManagedRegister::FromCpuRegister(EBP), + X86ManagedRegister::FromCpuRegister(ESI), + X86ManagedRegister::FromCpuRegister(EDI), + // No hard float callee saves. +}; + +template +static constexpr uint32_t CalculateCoreCalleeSpillMask( + const ManagedRegister (&callee_saves)[size]) { + // The spilled PC gets a special marker. + uint32_t result = 1 << kNumberOfCpuRegisters; + for (auto&& r : callee_saves) { + if (r.AsX86().IsCpuRegister()) { + result |= (1 << r.AsX86().AsCpuRegister()); + } + } + return result; +} + +static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters); +static constexpr uint32_t kFpCalleeSpillMask = 0u; + +static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = { + // Core registers. + X86ManagedRegister::FromCpuRegister(EBX), + X86ManagedRegister::FromCpuRegister(EBP), + X86ManagedRegister::FromCpuRegister(ESI), + X86ManagedRegister::FromCpuRegister(EDI), + // No hard float callee saves. +}; + +static constexpr uint32_t kNativeCoreCalleeSpillMask = + CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters); +static constexpr uint32_t kNativeFpCalleeSpillMask = 0u; + +// Calling convention + +ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const { + return X86ManagedRegister::FromCpuRegister(ECX); +} + +ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() const { + return X86ManagedRegister::FromCpuRegister(ECX); +} + +ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + if (jni) { + return X86ManagedRegister::FromX87Register(ST0); + } else { + return X86ManagedRegister::FromXmmRegister(XMM0); + } + } else if (shorty[0] == 'J') { + return X86ManagedRegister::FromRegisterPair(EAX_EDX); + } else if (shorty[0] == 'V') { + return ManagedRegister::NoRegister(); + } else { + return X86ManagedRegister::FromCpuRegister(EAX); + } +} + +ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), false); +} + +ManagedRegister X86JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), true); +} + +ManagedRegister X86JniCallingConvention::IntReturnRegister() { + return X86ManagedRegister::FromCpuRegister(EAX); +} + +// Managed runtime calling convention + +ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() { + return X86ManagedRegister::FromCpuRegister(EAX); +} + +bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything is passed by stack +} + +bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + // We assume all parameters are on stack, args coming via registers are spilled as entry_spills. + return true; +} + +ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (gpr_arg_count_) { + case 0: + res = X86ManagedRegister::FromCpuRegister(ECX); + break; + case 1: + res = X86ManagedRegister::FromCpuRegister(EDX); + break; + case 2: + // Don't split a long between the last register and the stack. + if (IsCurrentParamALong()) { + return ManagedRegister::NoRegister(); + } + res = X86ManagedRegister::FromCpuRegister(EBX); + break; + } + } else if (itr_float_and_doubles_ < 4) { + // First four float parameters are passed via XMM0..XMM3 + res = X86ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + DCHECK(IsCurrentParamALong()); + switch (gpr_arg_count_) { + case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break; + case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break; + } + return res; +} + +FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method* + (itr_slots_ * kFramePointerSize)); // offset into in args +} + +const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on X86 to free them up for scratch use, we then assume + // all arguments are on the stack. + if (entry_spills_.size() == 0) { + ResetIterator(FrameOffset(0)); + while (HasNext()) { + ManagedRegister in_reg = CurrentParamRegister(); + bool is_long = IsCurrentParamALong(); + if (!in_reg.IsNoRegister()) { + int32_t size = IsParamADouble(itr_args_) ? 8 : 4; + int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); + ManagedRegisterSpill spill(in_reg, size, spill_offset); + entry_spills_.push_back(spill); + if (is_long) { + // special case, as we need a second register here. + in_reg = CurrentParamHighLongRegister(); + DCHECK(!in_reg.IsNoRegister()); + // We have to spill the second half of the long. + ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4); + entry_spills_.push_back(spill2); + } + + // Keep track of the number of GPRs allocated. + if (!IsCurrentParamAFloatOrDouble()) { + if (is_long) { + // Long was allocated in 2 registers. + gpr_arg_count_ += 2; + } else { + gpr_arg_count_++; + } + } + } else if (is_long) { + // We need to skip the unused last register, which is empty. + // If we are already out of registers, this is harmless. + gpr_arg_count_ += 2; + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +X86JniCallingConvention::X86JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty) + : JniCallingConvention(is_static, + is_synchronized, + is_critical_native, + shorty, + kX86PointerSize) { +} + +uint32_t X86JniCallingConvention::CoreSpillMask() const { + return is_critical_native_ ? 0u : kCoreCalleeSpillMask; +} + +uint32_t X86JniCallingConvention::FpSpillMask() const { + return is_critical_native_ ? 0u : kFpCalleeSpillMask; +} + +size_t X86JniCallingConvention::FrameSize() const { + if (is_critical_native_) { + CHECK(!SpillsMethod()); + CHECK(!HasLocalReferenceSegmentState()); + CHECK(!HasHandleScope()); + CHECK(!SpillsReturnValue()); + return 0u; // There is no managed frame for @CriticalNative. + } + + // Method*, PC return address and callee save area size, local reference segment state + CHECK(SpillsMethod()); + const size_t method_ptr_size = static_cast(kX86PointerSize); + const size_t pc_return_addr_size = kFramePointerSize; + const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; + size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size; + + CHECK(HasLocalReferenceSegmentState()); + total_size += kFramePointerSize; + + CHECK(HasHandleScope()); + total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount()); + + // Plus return value spill area size + CHECK(SpillsReturnValue()); + total_size += SizeOfReturnValue(); + + return RoundUp(total_size, kStackAlignment); +} + +size_t X86JniCallingConvention::OutArgSize() const { + // Count param args, including JNIEnv* and jclass*; count 8-byte args twice. + size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs(); + // The size of outgoiong arguments. + size_t size = all_args * kFramePointerSize; + + // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS. + static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u); + static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u); + + if (UNLIKELY(IsCriticalNative())) { + // Add return address size for @CriticalNative. + // For normal native the return PC is part of the managed stack frame instead of out args. + size += kFramePointerSize; + // For @CriticalNative, we can make a tail call if there are no stack args + // and the return type is not FP type (needs moving from ST0 to MMX0) and + // we do not need to extend the result. + bool return_type_ok = GetShorty()[0] == 'I' || GetShorty()[0] == 'J' || GetShorty()[0] == 'V'; + DCHECK_EQ( + return_type_ok, + GetShorty()[0] != 'F' && GetShorty()[0] != 'D' && !RequiresSmallResultTypeExtension()); + if (return_type_ok && size == kFramePointerSize) { + // Note: This is not aligned to kNativeStackAlignment but that's OK for tail call. + static_assert(kFramePointerSize < kNativeStackAlignment); + DCHECK_EQ(kFramePointerSize, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u)); + return kFramePointerSize; + } + } + + size_t out_args_size = RoundUp(size, kNativeStackAlignment); + if (UNLIKELY(IsCriticalNative())) { + DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u)); + } + return out_args_size; +} + +ArrayRef X86JniCallingConvention::CalleeSaveRegisters() const { + if (UNLIKELY(IsCriticalNative())) { + // Do not spill anything, whether tail call or not (return PC is already on the stack). + return ArrayRef(); + } else { + return ArrayRef(kCalleeSaveRegisters); + } +} + +bool X86JniCallingConvention::IsCurrentParamInRegister() { + return false; // Everything is passed by stack. +} + +bool X86JniCallingConvention::IsCurrentParamOnStack() { + return true; // Everything is passed by stack. +} + +ManagedRegister X86JniCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + UNREACHABLE(); +} + +FrameOffset X86JniCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize)); +} + +ManagedRegister X86JniCallingConvention::HiddenArgumentRegister() const { + CHECK(IsCriticalNative()); + // EAX is neither managed callee-save, nor argument register, nor scratch register. + DCHECK(std::none_of(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kCalleeSaveRegisters), + [](ManagedRegister callee_save) constexpr { + return callee_save.Equals(X86ManagedRegister::FromCpuRegister(EAX)); + })); + DCHECK(!InterproceduralScratchRegister().Equals(X86ManagedRegister::FromCpuRegister(EAX))); + return X86ManagedRegister::FromCpuRegister(EAX); +} + +bool X86JniCallingConvention::UseTailCall() const { + CHECK(IsCriticalNative()); + return OutArgSize() == kFramePointerSize; +} + +} // namespace x86 +} // namespace art diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h new file mode 100644 index 0000000..1273e8d --- /dev/null +++ b/compiler/jni/quick/x86/calling_convention_x86.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ +#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ + +#include "base/enums.h" +#include "jni/quick/calling_convention.h" + +namespace art { +namespace x86 { + +class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention { + public: + X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, + is_synchronized, + shorty, + PointerSize::k32), + gpr_arg_count_(0) {} + ~X86ManagedRuntimeCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // Managed runtime calling convention + ManagedRegister MethodRegister() override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + const ManagedRegisterEntrySpills& EntrySpills() override; + + private: + int gpr_arg_count_; + ManagedRegister CurrentParamHighLongRegister(); + ManagedRegisterEntrySpills entry_spills_; + DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention); +}; + +// Implements the x86 cdecl calling convention. +class X86JniCallingConvention final : public JniCallingConvention { + public: + X86JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty); + ~X86JniCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister IntReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // JNI calling convention + size_t FrameSize() const override; + size_t OutArgSize() const override; + ArrayRef CalleeSaveRegisters() const override; + ManagedRegister ReturnScratchRegister() const override; + uint32_t CoreSpillMask() const override; + uint32_t FpSpillMask() const override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + + // x86 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const override { + return HasSmallReturnType(); + } + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + ManagedRegister HiddenArgumentRegister() const override; + + // Whether to use tail call (used only for @CriticalNative). + bool UseTailCall() const override; + + private: + DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention); +}; + +} // namespace x86 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc new file mode 100644 index 0000000..9013b02 --- /dev/null +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -0,0 +1,350 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_x86_64.h" + +#include + +#include "arch/instruction_set.h" +#include "arch/x86_64/jni_frame_x86_64.h" +#include "base/bit_utils.h" +#include "handle_scope-inl.h" +#include "utils/x86_64/managed_register_x86_64.h" + +namespace art { +namespace x86_64 { + +static constexpr ManagedRegister kCalleeSaveRegisters[] = { + // Core registers. + X86_64ManagedRegister::FromCpuRegister(RBX), + X86_64ManagedRegister::FromCpuRegister(RBP), + X86_64ManagedRegister::FromCpuRegister(R12), + X86_64ManagedRegister::FromCpuRegister(R13), + X86_64ManagedRegister::FromCpuRegister(R14), + X86_64ManagedRegister::FromCpuRegister(R15), + // Hard float registers. + X86_64ManagedRegister::FromXmmRegister(XMM12), + X86_64ManagedRegister::FromXmmRegister(XMM13), + X86_64ManagedRegister::FromXmmRegister(XMM14), + X86_64ManagedRegister::FromXmmRegister(XMM15), +}; + +template +static constexpr uint32_t CalculateCoreCalleeSpillMask( + const ManagedRegister (&callee_saves)[size]) { + // The spilled PC gets a special marker. + uint32_t result = 1u << kNumberOfCpuRegisters; + for (auto&& r : callee_saves) { + if (r.AsX86_64().IsCpuRegister()) { + result |= (1u << r.AsX86_64().AsCpuRegister().AsRegister()); + } + } + return result; +} + +template +static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) { + uint32_t result = 0u; + for (auto&& r : callee_saves) { + if (r.AsX86_64().IsXmmRegister()) { + result |= (1u << r.AsX86_64().AsXmmRegister().AsFloatRegister()); + } + } + return result; +} + +static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters); +static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters); + +static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = { + // Core registers. + X86_64ManagedRegister::FromCpuRegister(RBX), + X86_64ManagedRegister::FromCpuRegister(RBP), + X86_64ManagedRegister::FromCpuRegister(R12), + X86_64ManagedRegister::FromCpuRegister(R13), + X86_64ManagedRegister::FromCpuRegister(R14), + X86_64ManagedRegister::FromCpuRegister(R15), + // No callee-save float registers. +}; + +static constexpr uint32_t kNativeCoreCalleeSpillMask = + CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters); +static constexpr uint32_t kNativeFpCalleeSpillMask = + CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters); + +// Calling convention + +ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() const { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + return X86_64ManagedRegister::FromXmmRegister(XMM0); + } else if (shorty[0] == 'J') { + return X86_64ManagedRegister::FromCpuRegister(RAX); + } else if (shorty[0] == 'V') { + return ManagedRegister::NoRegister(); + } else { + return X86_64ManagedRegister::FromCpuRegister(RAX); + } +} + +ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), false); +} + +ManagedRegister X86_64JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), true); +} + +ManagedRegister X86_64JniCallingConvention::IntReturnRegister() { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +// Managed runtime calling convention + +ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() { + return X86_64ManagedRegister::FromCpuRegister(RDI); +} + +bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return !IsCurrentParamOnStack(); +} + +bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + // We assume all parameters are on stack, args coming via registers are spilled as entry_spills + return true; +} + +ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (itr_args_ - itr_float_and_doubles_) { + case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; + case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; + case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; + case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break; + case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break; + } + } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) { + // First eight float parameters are passed via XMM0..XMM7 + res = X86_64ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + return FrameOffset(displacement_.Int32Value() + // displacement + static_cast(kX86_64PointerSize) + // Method ref + itr_slots_ * sizeof(uint32_t)); // offset into in args +} + +const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on X86 to free them up for scratch use, we then assume + // all arguments are on the stack. + if (entry_spills_.size() == 0) { + ResetIterator(FrameOffset(0)); + while (HasNext()) { + ManagedRegister in_reg = CurrentParamRegister(); + if (!in_reg.IsNoRegister()) { + int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4; + int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); + ManagedRegisterSpill spill(in_reg, size, spill_offset); + entry_spills_.push_back(spill); + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty) + : JniCallingConvention(is_static, + is_synchronized, + is_critical_native, + shorty, + kX86_64PointerSize) { +} + +uint32_t X86_64JniCallingConvention::CoreSpillMask() const { + return is_critical_native_ ? 0u : kCoreCalleeSpillMask; +} + +uint32_t X86_64JniCallingConvention::FpSpillMask() const { + return is_critical_native_ ? 0u : kFpCalleeSpillMask; +} + +size_t X86_64JniCallingConvention::FrameSize() const { + if (is_critical_native_) { + CHECK(!SpillsMethod()); + CHECK(!HasLocalReferenceSegmentState()); + CHECK(!HasHandleScope()); + CHECK(!SpillsReturnValue()); + return 0u; // There is no managed frame for @CriticalNative. + } + + // Method*, PC return address and callee save area size, local reference segment state + CHECK(SpillsMethod()); + const size_t method_ptr_size = static_cast(kX86_64PointerSize); + const size_t pc_return_addr_size = kFramePointerSize; + const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; + size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size; + + CHECK(HasLocalReferenceSegmentState()); + total_size += kFramePointerSize; + + CHECK(HasHandleScope()); + total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount()); + + // Plus return value spill area size + CHECK(SpillsReturnValue()); + total_size += SizeOfReturnValue(); + + return RoundUp(total_size, kStackAlignment); +} + +size_t X86_64JniCallingConvention::OutArgSize() const { + // Count param args, including JNIEnv* and jclass*. + size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs(); + size_t num_fp_args = NumFloatOrDoubleArgs(); + DCHECK_GE(all_args, num_fp_args); + size_t num_non_fp_args = all_args - num_fp_args; + // Account for FP arguments passed through Xmm0..Xmm7. + size_t num_stack_fp_args = + num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args); + // Account for other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9). + size_t num_stack_non_fp_args = + num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args); + // The size of outgoing arguments. + static_assert(kFramePointerSize == kMmxSpillSize); + size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize; + + if (UNLIKELY(IsCriticalNative())) { + // We always need to spill xmm12-xmm15 as they are managed callee-saves + // but not native callee-saves. + static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u); + static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) != 0u); + static_assert( + kAlwaysSpilledMmxRegisters == POPCOUNT(kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask)); + size += kAlwaysSpilledMmxRegisters * kMmxSpillSize; + // Add return address size for @CriticalNative + // For normal native the return PC is part of the managed stack frame instead of out args. + size += kFramePointerSize; + } + + size_t out_args_size = RoundUp(size, kNativeStackAlignment); + if (UNLIKELY(IsCriticalNative())) { + DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u)); + } + return out_args_size; +} + +ArrayRef X86_64JniCallingConvention::CalleeSaveRegisters() const { + if (UNLIKELY(IsCriticalNative())) { + DCHECK(!UseTailCall()); + static_assert(std::size(kCalleeSaveRegisters) > std::size(kNativeCalleeSaveRegisters)); + // TODO: Change to static_assert; std::equal should be constexpr since C++20. + DCHECK(std::equal(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kNativeCalleeSaveRegisters), + kNativeCalleeSaveRegisters, + [](ManagedRegister lhs, ManagedRegister rhs) { return lhs.Equals(rhs); })); + return ArrayRef(kCalleeSaveRegisters).SubArray( + /*pos=*/ std::size(kNativeCalleeSaveRegisters)); + } else { + return ArrayRef(kCalleeSaveRegisters); + } +} + +bool X86_64JniCallingConvention::IsCurrentParamInRegister() { + return !IsCurrentParamOnStack(); +} + +bool X86_64JniCallingConvention::IsCurrentParamOnStack() { + return CurrentParamRegister().IsNoRegister(); +} + +ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (itr_args_ - itr_float_and_doubles_) { + case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break; + case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; + case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; + case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; + case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break; + case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break; + static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)"); + } + } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) { + // First eight float parameters are passed via XMM0..XMM7 + res = X86_64ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + size_t args_on_stack = itr_args_ + - std::min(kMaxFloatOrDoubleRegisterArguments, + static_cast(itr_float_and_doubles_)) + // Float arguments passed through Xmm0..Xmm7 + - std::min(kMaxIntLikeRegisterArguments, + static_cast(itr_args_ - itr_float_and_doubles_)); + // Integer arguments passed through GPR + size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +ManagedRegister X86_64JniCallingConvention::HiddenArgumentRegister() const { + CHECK(IsCriticalNative()); + // R11 is neither managed callee-save, nor argument register, nor scratch register. + DCHECK(std::none_of(kCalleeSaveRegisters, + kCalleeSaveRegisters + std::size(kCalleeSaveRegisters), + [](ManagedRegister callee_save) constexpr { + return callee_save.Equals(X86_64ManagedRegister::FromCpuRegister(R11)); + })); + DCHECK(!InterproceduralScratchRegister().Equals(X86_64ManagedRegister::FromCpuRegister(R11))); + return X86_64ManagedRegister::FromCpuRegister(R11); +} + +// Whether to use tail call (used only for @CriticalNative). +bool X86_64JniCallingConvention::UseTailCall() const { + CHECK(IsCriticalNative()); + // We always need to spill xmm12-xmm15 as they are managed callee-saves + // but not native callee-saves, so we can never use a tail call. + return false; +} + +} // namespace x86_64 +} // namespace art diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h new file mode 100644 index 0000000..37b5978 --- /dev/null +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ +#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ + +#include "base/enums.h" +#include "jni/quick/calling_convention.h" + +namespace art { +namespace x86_64 { + +class X86_64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention { + public: + X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, + is_synchronized, + shorty, + PointerSize::k64) {} + ~X86_64ManagedRuntimeCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // Managed runtime calling convention + ManagedRegister MethodRegister() override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + const ManagedRegisterEntrySpills& EntrySpills() override; + private: + ManagedRegisterEntrySpills entry_spills_; + DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention); +}; + +class X86_64JniCallingConvention final : public JniCallingConvention { + public: + X86_64JniCallingConvention(bool is_static, + bool is_synchronized, + bool is_critical_native, + const char* shorty); + ~X86_64JniCallingConvention() override {} + // Calling convention + ManagedRegister ReturnRegister() override; + ManagedRegister IntReturnRegister() override; + ManagedRegister InterproceduralScratchRegister() const override; + // JNI calling convention + size_t FrameSize() const override; + size_t OutArgSize() const override; + ArrayRef CalleeSaveRegisters() const override; + ManagedRegister ReturnScratchRegister() const override; + uint32_t CoreSpillMask() const override; + uint32_t FpSpillMask() const override; + bool IsCurrentParamInRegister() override; + bool IsCurrentParamOnStack() override; + ManagedRegister CurrentParamRegister() override; + FrameOffset CurrentParamStackOffset() override; + + // x86-64 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const override { + return HasSmallReturnType(); + } + + // Hidden argument register, used to pass the method pointer for @CriticalNative call. + ManagedRegister HiddenArgumentRegister() const override; + + // Whether to use tail call (used only for @CriticalNative). + bool UseTailCall() const override; + + private: + DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention); +}; + +} // namespace x86_64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h new file mode 100644 index 0000000..1c523de --- /dev/null +++ b/compiler/linker/linker_patch.h @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_LINKER_LINKER_PATCH_H_ +#define ART_COMPILER_LINKER_LINKER_PATCH_H_ + +#include +#include + +#include + +#include "base/bit_utils.h" +#include "dex/method_reference.h" + +namespace art { + +class DexFile; + +namespace linker { + +class LinkerPatch { + public: + // Note: We explicitly specify the underlying type of the enum because GCC + // would otherwise select a bigger underlying type and then complain that + // 'art::LinkerPatch::patch_type_' is too small to hold all + // values of 'enum class art::LinkerPatch::Type' + // which is ridiculous given we have only a handful of values here. If we + // choose to squeeze the Type into fewer than 8 bits, we'll have to declare + // patch_type_ as an uintN_t and do explicit static_cast<>s. + // + // Note: Actual patching is instruction_set-dependent. + enum class Type : uint8_t { + kIntrinsicReference, // Boot image reference for an intrinsic, see IntrinsicObjects. + kDataBimgRelRo, + kMethodRelative, + kMethodBssEntry, + kCallRelative, + kTypeRelative, + kTypeBssEntry, + kStringRelative, + kStringBssEntry, + kCallEntrypoint, + kBakerReadBarrierBranch, + }; + + static LinkerPatch IntrinsicReferencePatch(size_t literal_offset, + uint32_t pc_insn_offset, + uint32_t intrinsic_data) { + LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file= */ nullptr); + patch.intrinsic_data_ = intrinsic_data; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch DataBimgRelRoPatch(size_t literal_offset, + uint32_t pc_insn_offset, + uint32_t boot_image_offset) { + LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file= */ nullptr); + patch.boot_image_offset_ = boot_image_offset; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch RelativeMethodPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kMethodRelative, target_dex_file); + patch.method_idx_ = target_method_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch MethodBssEntryPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kMethodBssEntry, target_dex_file); + patch.method_idx_ = target_method_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch RelativeCodePatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kCallRelative, target_dex_file); + patch.method_idx_ = target_method_idx; + return patch; + } + + static LinkerPatch RelativeTypePatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_type_idx) { + LinkerPatch patch(literal_offset, Type::kTypeRelative, target_dex_file); + patch.type_idx_ = target_type_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch TypeBssEntryPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_type_idx) { + LinkerPatch patch(literal_offset, Type::kTypeBssEntry, target_dex_file); + patch.type_idx_ = target_type_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch RelativeStringPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_string_idx) { + LinkerPatch patch(literal_offset, Type::kStringRelative, target_dex_file); + patch.string_idx_ = target_string_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch StringBssEntryPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_string_idx) { + LinkerPatch patch(literal_offset, Type::kStringBssEntry, target_dex_file); + patch.string_idx_ = target_string_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch CallEntrypointPatch(size_t literal_offset, + uint32_t entrypoint_offset) { + LinkerPatch patch(literal_offset, + Type::kCallEntrypoint, + /* target_dex_file= */ nullptr); + patch.entrypoint_offset_ = entrypoint_offset; + return patch; + } + + static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset, + uint32_t custom_value1 = 0u, + uint32_t custom_value2 = 0u) { + LinkerPatch patch(literal_offset, + Type::kBakerReadBarrierBranch, + /* target_dex_file= */ nullptr); + patch.baker_custom_value1_ = custom_value1; + patch.baker_custom_value2_ = custom_value2; + return patch; + } + + LinkerPatch(const LinkerPatch& other) = default; + LinkerPatch& operator=(const LinkerPatch& other) = default; + + size_t LiteralOffset() const { + return literal_offset_; + } + + Type GetType() const { + return patch_type_; + } + + uint32_t IntrinsicData() const { + DCHECK(patch_type_ == Type::kIntrinsicReference); + return intrinsic_data_; + } + + uint32_t BootImageOffset() const { + DCHECK(patch_type_ == Type::kDataBimgRelRo); + return boot_image_offset_; + } + + MethodReference TargetMethod() const { + DCHECK(patch_type_ == Type::kMethodRelative || + patch_type_ == Type::kMethodBssEntry || + patch_type_ == Type::kCallRelative); + return MethodReference(target_dex_file_, method_idx_); + } + + const DexFile* TargetTypeDexFile() const { + DCHECK(patch_type_ == Type::kTypeRelative || + patch_type_ == Type::kTypeBssEntry); + return target_dex_file_; + } + + dex::TypeIndex TargetTypeIndex() const { + DCHECK(patch_type_ == Type::kTypeRelative || + patch_type_ == Type::kTypeBssEntry); + return dex::TypeIndex(type_idx_); + } + + const DexFile* TargetStringDexFile() const { + DCHECK(patch_type_ == Type::kStringRelative || + patch_type_ == Type::kStringBssEntry); + return target_dex_file_; + } + + dex::StringIndex TargetStringIndex() const { + DCHECK(patch_type_ == Type::kStringRelative || + patch_type_ == Type::kStringBssEntry); + return dex::StringIndex(string_idx_); + } + + uint32_t PcInsnOffset() const { + DCHECK(patch_type_ == Type::kIntrinsicReference || + patch_type_ == Type::kDataBimgRelRo || + patch_type_ == Type::kMethodRelative || + patch_type_ == Type::kMethodBssEntry || + patch_type_ == Type::kTypeRelative || + patch_type_ == Type::kTypeBssEntry || + patch_type_ == Type::kStringRelative || + patch_type_ == Type::kStringBssEntry); + return pc_insn_offset_; + } + + uint32_t EntrypointOffset() const { + DCHECK(patch_type_ == Type::kCallEntrypoint); + return entrypoint_offset_; + } + + uint32_t GetBakerCustomValue1() const { + DCHECK(patch_type_ == Type::kBakerReadBarrierBranch); + return baker_custom_value1_; + } + + uint32_t GetBakerCustomValue2() const { + DCHECK(patch_type_ == Type::kBakerReadBarrierBranch); + return baker_custom_value2_; + } + + private: + LinkerPatch(size_t literal_offset, Type patch_type, const DexFile* target_dex_file) + : target_dex_file_(target_dex_file), + literal_offset_(literal_offset), + patch_type_(patch_type) { + cmp1_ = 0u; + cmp2_ = 0u; + // The compiler rejects methods that are too big, so the compiled code + // of a single method really shouln't be anywhere close to 16MiB. + DCHECK(IsUint<24>(literal_offset)); + } + + const DexFile* target_dex_file_; + // TODO: Clean up naming. Some patched locations are literals but others are not. + uint32_t literal_offset_ : 24; // Method code size up to 16MiB. + Type patch_type_ : 8; + union { + uint32_t cmp1_; // Used for relational operators. + uint32_t boot_image_offset_; // Data to write to the .data.bimg.rel.ro entry. + uint32_t method_idx_; // Method index for Call/Method patches. + uint32_t type_idx_; // Type index for Type patches. + uint32_t string_idx_; // String index for String patches. + uint32_t intrinsic_data_; // Data for IntrinsicObjects. + uint32_t entrypoint_offset_; // Entrypoint offset in the Thread object. + uint32_t baker_custom_value1_; + static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(intrinsic_data_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(baker_custom_value1_) == sizeof(cmp1_), "needed by relational operators"); + }; + union { + // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`. + // This allows a hashing function to treat an array of linker patches as raw memory. + size_t cmp2_; // Used for relational operators. + // Literal offset of the insn loading PC (same as literal_offset if it's the same insn, + // may be different if the PC-relative addressing needs multiple insns). + uint32_t pc_insn_offset_; + uint32_t baker_custom_value2_; + static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators"); + static_assert(sizeof(baker_custom_value2_) <= sizeof(cmp2_), "needed by relational operators"); + }; + + friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs); + friend bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs); +}; +std::ostream& operator<<(std::ostream& os, const LinkerPatch::Type& type); + +inline bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs) { + return lhs.literal_offset_ == rhs.literal_offset_ && + lhs.patch_type_ == rhs.patch_type_ && + lhs.target_dex_file_ == rhs.target_dex_file_ && + lhs.cmp1_ == rhs.cmp1_ && + lhs.cmp2_ == rhs.cmp2_; +} + +inline bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs) { + return (lhs.literal_offset_ != rhs.literal_offset_) ? lhs.literal_offset_ < rhs.literal_offset_ + : (lhs.patch_type_ != rhs.patch_type_) ? lhs.patch_type_ < rhs.patch_type_ + : (lhs.target_dex_file_ != rhs.target_dex_file_) ? lhs.target_dex_file_ < rhs.target_dex_file_ + : (lhs.cmp1_ != rhs.cmp1_) ? lhs.cmp1_ < rhs.cmp1_ + : lhs.cmp2_ < rhs.cmp2_; +} + +} // namespace linker +} // namespace art + +#endif // ART_COMPILER_LINKER_LINKER_PATCH_H_ diff --git a/compiler/linker/linker_patch_test.cc b/compiler/linker/linker_patch_test.cc new file mode 100644 index 0000000..997418c --- /dev/null +++ b/compiler/linker/linker_patch_test.cc @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "linker_patch.h" + +namespace art { +namespace linker { + +TEST(LinkerPatch, LinkerPatchOperators) { + const DexFile* dex_file1 = reinterpret_cast(1); + const DexFile* dex_file2 = reinterpret_cast(2); + LinkerPatch patches[] = { + LinkerPatch::IntrinsicReferencePatch(16u, 3000u, 1000u), + LinkerPatch::IntrinsicReferencePatch(16u, 3001u, 1000u), + LinkerPatch::IntrinsicReferencePatch(16u, 3000u, 1001u), + LinkerPatch::IntrinsicReferencePatch(16u, 3001u, 1001u), + LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Index 7. + LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1001u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u), + LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u), + LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u), + LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u), + LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1001u), + LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1001u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1001u), + LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1000u), + LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1000u), + LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1001u), + LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1001u), + LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1000u), + LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1000u), + LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1001u), + LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1001u), + LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 0u), + LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 1u), + LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u), + LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 1u), + + LinkerPatch::IntrinsicReferencePatch(32u, 3000u, 1000u), + LinkerPatch::IntrinsicReferencePatch(32u, 3001u, 1000u), + LinkerPatch::IntrinsicReferencePatch(32u, 3000u, 1001u), + LinkerPatch::IntrinsicReferencePatch(32u, 3001u, 1001u), + LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u), + LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u), + LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u), + LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u), + LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1000u), + LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1000u), + LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1001u), + LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1001u), + LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1000u), + LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1000u), + LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1001u), + LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1001u), + LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 0u), + LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 1u), + LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u), + LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 1u), + + LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Same as patch at index 7. + }; + constexpr size_t last_index = arraysize(patches) - 1u; + + for (size_t i = 0; i != arraysize(patches); ++i) { + for (size_t j = 0; j != arraysize(patches); ++j) { + bool expected = (i != last_index ? i : 7u) == (j != last_index ? j : 7u); + EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j; + } + } + + for (size_t i = 0; i != arraysize(patches); ++i) { + for (size_t j = 0; j != arraysize(patches); ++j) { + bool expected = (i != last_index ? i : 7u) < (j != last_index ? j : 7u); + EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j; + } + } +} + +} // namespace linker +} // namespace art diff --git a/compiler/linker/output_stream_test.cc b/compiler/linker/output_stream_test.cc new file mode 100644 index 0000000..00231b1 --- /dev/null +++ b/compiler/linker/output_stream_test.cc @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "base/macros.h" +#include "base/unix_file/fd_file.h" +#include "common_runtime_test.h" +#include "stream/buffered_output_stream.h" +#include "stream/file_output_stream.h" +#include "stream/vector_output_stream.h" + +namespace art { +namespace linker { + +class OutputStreamTest : public CommonRuntimeTest { + protected: + void CheckOffset(off_t expected) { + off_t actual = output_stream_->Seek(0, kSeekCurrent); + EXPECT_EQ(expected, actual); + } + + void SetOutputStream(OutputStream& output_stream) { + output_stream_ = &output_stream; + } + + void GenerateTestOutput() { + EXPECT_EQ(3, output_stream_->Seek(3, kSeekCurrent)); + CheckOffset(3); + EXPECT_EQ(2, output_stream_->Seek(2, kSeekSet)); + CheckOffset(2); + uint8_t buf[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + EXPECT_TRUE(output_stream_->WriteFully(buf, 2)); + CheckOffset(4); + EXPECT_EQ(6, output_stream_->Seek(2, kSeekEnd)); + CheckOffset(6); + EXPECT_TRUE(output_stream_->WriteFully(buf, 4)); + CheckOffset(10); + EXPECT_TRUE(output_stream_->WriteFully(buf, 6)); + EXPECT_TRUE(output_stream_->Flush()); + } + + void CheckTestOutput(const std::vector& actual) { + uint8_t expected[] = { + 0, 0, 1, 2, 0, 0, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6 + }; + EXPECT_EQ(sizeof(expected), actual.size()); + EXPECT_EQ(0, memcmp(expected, &actual[0], actual.size())); + } + + OutputStream* output_stream_; +}; + +TEST_F(OutputStreamTest, File) { + ScratchFile tmp; + FileOutputStream output_stream(tmp.GetFile()); + SetOutputStream(output_stream); + GenerateTestOutput(); + std::unique_ptr in(OS::OpenFileForReading(tmp.GetFilename().c_str())); + EXPECT_TRUE(in.get() != nullptr); + std::vector actual(in->GetLength()); + bool readSuccess = in->ReadFully(&actual[0], actual.size()); + EXPECT_TRUE(readSuccess); + CheckTestOutput(actual); +} + +TEST_F(OutputStreamTest, Buffered) { + ScratchFile tmp; + { + BufferedOutputStream buffered_output_stream(std::make_unique(tmp.GetFile())); + SetOutputStream(buffered_output_stream); + GenerateTestOutput(); + } + std::unique_ptr in(OS::OpenFileForReading(tmp.GetFilename().c_str())); + EXPECT_TRUE(in.get() != nullptr); + std::vector actual(in->GetLength()); + bool readSuccess = in->ReadFully(&actual[0], actual.size()); + EXPECT_TRUE(readSuccess); + CheckTestOutput(actual); +} + +TEST_F(OutputStreamTest, Vector) { + std::vector output; + VectorOutputStream output_stream("test vector output", &output); + SetOutputStream(output_stream); + GenerateTestOutput(); + CheckTestOutput(output); +} + +TEST_F(OutputStreamTest, BufferedFlush) { + struct CheckingOutputStream : OutputStream { + CheckingOutputStream() + : OutputStream("dummy"), + flush_called(false) { } + ~CheckingOutputStream() override {} + + bool WriteFully(const void* buffer ATTRIBUTE_UNUSED, + size_t byte_count ATTRIBUTE_UNUSED) override { + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } + + off_t Seek(off_t offset ATTRIBUTE_UNUSED, Whence whence ATTRIBUTE_UNUSED) override { + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } + + bool Flush() override { + flush_called = true; + return true; + } + + bool flush_called; + }; + + std::unique_ptr cos = std::make_unique(); + CheckingOutputStream* checking_output_stream = cos.get(); + BufferedOutputStream buffered(std::move(cos)); + ASSERT_FALSE(checking_output_stream->flush_called); + bool flush_result = buffered.Flush(); + ASSERT_TRUE(flush_result); + ASSERT_TRUE(checking_output_stream->flush_called); +} + +} // namespace linker +} // namespace art diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc new file mode 100644 index 0000000..e1f061a --- /dev/null +++ b/compiler/optimizing/block_builder.cc @@ -0,0 +1,490 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "block_builder.h" + +#include "base/logging.h" // FOR VLOG. +#include "dex/bytecode_utils.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/dex_file_exception_helpers.h" +#include "quicken_info.h" + +namespace art { + +HBasicBlockBuilder::HBasicBlockBuilder(HGraph* graph, + const DexFile* const dex_file, + const CodeItemDebugInfoAccessor& accessor, + ScopedArenaAllocator* local_allocator) + : allocator_(graph->GetAllocator()), + graph_(graph), + dex_file_(dex_file), + code_item_accessor_(accessor), + local_allocator_(local_allocator), + branch_targets_(code_item_accessor_.HasCodeItem() + ? code_item_accessor_.InsnsSizeInCodeUnits() + : /* fake dex_pc=0 for intrinsic graph */ 1u, + nullptr, + local_allocator->Adapter(kArenaAllocGraphBuilder)), + throwing_blocks_(kDefaultNumberOfThrowingBlocks, + local_allocator->Adapter(kArenaAllocGraphBuilder)), + number_of_branches_(0u), + quicken_index_for_dex_pc_(std::less(), + local_allocator->Adapter(kArenaAllocGraphBuilder)) {} + +HBasicBlock* HBasicBlockBuilder::MaybeCreateBlockAt(uint32_t dex_pc) { + return MaybeCreateBlockAt(dex_pc, dex_pc); +} + +HBasicBlock* HBasicBlockBuilder::MaybeCreateBlockAt(uint32_t semantic_dex_pc, + uint32_t store_dex_pc) { + HBasicBlock* block = branch_targets_[store_dex_pc]; + if (block == nullptr) { + block = new (allocator_) HBasicBlock(graph_, semantic_dex_pc); + branch_targets_[store_dex_pc] = block; + } + DCHECK_EQ(block->GetDexPc(), semantic_dex_pc); + return block; +} + +bool HBasicBlockBuilder::CreateBranchTargets() { + // Create the first block for the dex instructions, single successor of the entry block. + MaybeCreateBlockAt(0u); + + if (code_item_accessor_.TriesSize() != 0) { + // Create branch targets at the start/end of the TryItem range. These are + // places where the program might fall through into/out of the a block and + // where TryBoundary instructions will be inserted later. Other edges which + // enter/exit the try blocks are a result of branches/switches. + for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) { + uint32_t dex_pc_start = try_item.start_addr_; + uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_; + MaybeCreateBlockAt(dex_pc_start); + if (dex_pc_end < code_item_accessor_.InsnsSizeInCodeUnits()) { + // TODO: Do not create block if the last instruction cannot fall through. + MaybeCreateBlockAt(dex_pc_end); + } else if (dex_pc_end == code_item_accessor_.InsnsSizeInCodeUnits()) { + // The TryItem spans until the very end of the CodeItem and therefore + // cannot have any code afterwards. + } else { + // The TryItem spans beyond the end of the CodeItem. This is invalid code. + VLOG(compiler) << "Not compiled: TryItem spans beyond the end of the CodeItem"; + return false; + } + } + + // Create branch targets for exception handlers. + const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData(); + uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); + for (uint32_t idx = 0; idx < handlers_size; ++idx) { + CatchHandlerIterator iterator(handlers_ptr); + for (; iterator.HasNext(); iterator.Next()) { + MaybeCreateBlockAt(iterator.GetHandlerAddress()); + } + handlers_ptr = iterator.EndDataPointer(); + } + } + + // Iterate over all instructions and find branching instructions. Create blocks for + // the locations these instructions branch to. + for (const DexInstructionPcPair& pair : code_item_accessor_) { + const uint32_t dex_pc = pair.DexPc(); + const Instruction& instruction = pair.Inst(); + + if (instruction.IsBranch()) { + number_of_branches_++; + MaybeCreateBlockAt(dex_pc + instruction.GetTargetOffset()); + } else if (instruction.IsSwitch()) { + number_of_branches_++; // count as at least one branch (b/77652521) + DexSwitchTable table(instruction, dex_pc); + for (DexSwitchTableIterator s_it(table); !s_it.Done(); s_it.Advance()) { + MaybeCreateBlockAt(dex_pc + s_it.CurrentTargetOffset()); + + // Create N-1 blocks where we will insert comparisons of the input value + // against the Switch's case keys. + if (table.ShouldBuildDecisionTree() && !s_it.IsLast()) { + // Store the block under dex_pc of the current key at the switch data + // instruction for uniqueness but give it the dex_pc of the SWITCH + // instruction which it semantically belongs to. + MaybeCreateBlockAt(dex_pc, s_it.GetDexPcForCurrentIndex()); + } + } + } else if (instruction.Opcode() == Instruction::MOVE_EXCEPTION) { + // End the basic block after MOVE_EXCEPTION. This simplifies the later + // stage of TryBoundary-block insertion. + } else { + continue; + } + + if (instruction.CanFlowThrough()) { + DexInstructionIterator next(std::next(DexInstructionIterator(pair))); + if (next == code_item_accessor_.end()) { + // In the normal case we should never hit this but someone can artificially forge a dex + // file to fall-through out the method code. In this case we bail out compilation. + VLOG(compiler) << "Not compiled: Fall-through beyond the CodeItem"; + return false; + } + MaybeCreateBlockAt(next.DexPc()); + } + } + + return true; +} + +void HBasicBlockBuilder::ConnectBasicBlocks() { + HBasicBlock* block = graph_->GetEntryBlock(); + graph_->AddBlock(block); + + size_t quicken_index = 0; + bool is_throwing_block = false; + // Calculate the qucikening index here instead of CreateBranchTargets since it's easier to + // calculate in dex_pc order. + for (const DexInstructionPcPair& pair : code_item_accessor_) { + const uint32_t dex_pc = pair.DexPc(); + const Instruction& instruction = pair.Inst(); + + // Check if this dex_pc address starts a new basic block. + HBasicBlock* next_block = GetBlockAt(dex_pc); + if (next_block != nullptr) { + // We only need quicken index entries for basic block boundaries. + quicken_index_for_dex_pc_.Put(dex_pc, quicken_index); + if (block != nullptr) { + // Last instruction did not end its basic block but a new one starts here. + // It must have been a block falling through into the next one. + block->AddSuccessor(next_block); + } + block = next_block; + is_throwing_block = false; + graph_->AddBlock(block); + } + // Make sure to increment this before the continues. + if (QuickenInfoTable::NeedsIndexForInstruction(&instruction)) { + ++quicken_index; + } + + if (block == nullptr) { + // Ignore dead code. + continue; + } + + if (!is_throwing_block && IsThrowingDexInstruction(instruction)) { + DCHECK(!ContainsElement(throwing_blocks_, block)); + is_throwing_block = true; + throwing_blocks_.push_back(block); + } + + if (instruction.IsBranch()) { + uint32_t target_dex_pc = dex_pc + instruction.GetTargetOffset(); + block->AddSuccessor(GetBlockAt(target_dex_pc)); + } else if (instruction.IsReturn() || (instruction.Opcode() == Instruction::THROW)) { + block->AddSuccessor(graph_->GetExitBlock()); + } else if (instruction.IsSwitch()) { + DexSwitchTable table(instruction, dex_pc); + for (DexSwitchTableIterator s_it(table); !s_it.Done(); s_it.Advance()) { + uint32_t target_dex_pc = dex_pc + s_it.CurrentTargetOffset(); + block->AddSuccessor(GetBlockAt(target_dex_pc)); + + if (table.ShouldBuildDecisionTree() && !s_it.IsLast()) { + uint32_t next_case_dex_pc = s_it.GetDexPcForCurrentIndex(); + HBasicBlock* next_case_block = GetBlockAt(next_case_dex_pc); + block->AddSuccessor(next_case_block); + block = next_case_block; + graph_->AddBlock(block); + } + } + } else { + // Remaining code only applies to instructions which end their basic block. + continue; + } + + // Go to the next instruction in case we read dex PC below. + if (instruction.CanFlowThrough()) { + block->AddSuccessor(GetBlockAt(std::next(DexInstructionIterator(pair)).DexPc())); + } + + // The basic block ends here. Do not add any more instructions. + block = nullptr; + } + + graph_->AddBlock(graph_->GetExitBlock()); +} + +// Returns the TryItem stored for `block` or nullptr if there is no info for it. +static const dex::TryItem* GetTryItem( + HBasicBlock* block, + const ScopedArenaSafeMap& try_block_info) { + auto iterator = try_block_info.find(block->GetBlockId()); + return (iterator == try_block_info.end()) ? nullptr : iterator->second; +} + +// Iterates over the exception handlers of `try_item`, finds the corresponding +// catch blocks and makes them successors of `try_boundary`. The order of +// successors matches the order in which runtime exception delivery searches +// for a handler. +static void LinkToCatchBlocks(HTryBoundary* try_boundary, + const CodeItemDataAccessor& accessor, + const dex::TryItem* try_item, + const ScopedArenaSafeMap& catch_blocks) { + for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_)); + it.HasNext(); + it.Next()) { + try_boundary->AddExceptionHandler(catch_blocks.Get(it.GetHandlerAddress())); + } +} + +bool HBasicBlockBuilder::MightHaveLiveNormalPredecessors(HBasicBlock* catch_block) { + if (kIsDebugBuild) { + DCHECK_NE(catch_block->GetDexPc(), kNoDexPc) << "Should not be called on synthetic blocks"; + DCHECK(!graph_->GetEntryBlock()->GetSuccessors().empty()) + << "Basic blocks must have been created and connected"; + for (HBasicBlock* predecessor : catch_block->GetPredecessors()) { + DCHECK(!predecessor->IsSingleTryBoundary()) + << "TryBoundary blocks must not have not been created yet"; + } + } + + const Instruction& first = code_item_accessor_.InstructionAt(catch_block->GetDexPc()); + if (first.Opcode() == Instruction::MOVE_EXCEPTION) { + // Verifier guarantees that if a catch block begins with MOVE_EXCEPTION then + // it has no live normal predecessors. + return false; + } else if (catch_block->GetPredecessors().empty()) { + // Normal control-flow edges have already been created. Since block's list of + // predecessors is empty, it cannot have any live or dead normal predecessors. + return false; + } + + // The catch block has normal predecessors but we do not know which are live + // and which will be removed during the initial DCE. Return `true` to signal + // that it may have live normal predecessors. + return true; +} + +void HBasicBlockBuilder::InsertTryBoundaryBlocks() { + if (code_item_accessor_.TriesSize() == 0) { + return; + } + + // Keep a map of all try blocks and their respective TryItems. We do not use + // the block's pointer but rather its id to ensure deterministic iteration. + ScopedArenaSafeMap try_block_info( + std::less(), local_allocator_->Adapter(kArenaAllocGraphBuilder)); + + // Obtain TryItem information for blocks with throwing instructions, and split + // blocks which are both try & catch to simplify the graph. + for (HBasicBlock* block : graph_->GetBlocks()) { + if (block->GetDexPc() == kNoDexPc) { + continue; + } + + // Do not bother creating exceptional edges for try blocks which have no + // throwing instructions. In that case we simply assume that the block is + // not covered by a TryItem. This prevents us from creating a throw-catch + // loop for synchronized blocks. + if (ContainsElement(throwing_blocks_, block)) { + // Try to find a TryItem covering the block. + const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc()); + if (try_item != nullptr) { + // Block throwing and in a TryItem. Store the try block information. + try_block_info.Put(block->GetBlockId(), try_item); + } + } + } + + // Map from a handler dex_pc to the corresponding catch block. + ScopedArenaSafeMap catch_blocks( + std::less(), local_allocator_->Adapter(kArenaAllocGraphBuilder)); + + // Iterate over catch blocks, create artifical landing pads if necessary to + // simplify the CFG, and set metadata. + const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData(); + uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); + for (uint32_t idx = 0; idx < handlers_size; ++idx) { + CatchHandlerIterator iterator(handlers_ptr); + for (; iterator.HasNext(); iterator.Next()) { + uint32_t address = iterator.GetHandlerAddress(); + auto existing = catch_blocks.find(address); + if (existing != catch_blocks.end()) { + // Catch block already processed. + TryCatchInformation* info = existing->second->GetTryCatchInformation(); + if (iterator.GetHandlerTypeIndex() != info->GetCatchTypeIndex()) { + // The handler is for multiple types. We could record all the types, but + // doing class resolution here isn't ideal, and it's unclear whether wasting + // the space in TryCatchInformation is worth it. + info->SetInvalidTypeIndex(); + } + continue; + } + + // Check if we should create an artifical landing pad for the catch block. + // We create one if the catch block is also a try block because we do not + // have a strategy for inserting TryBoundaries on exceptional edges. + // We also create one if the block might have normal predecessors so as to + // simplify register allocation. + HBasicBlock* catch_block = GetBlockAt(address); + bool is_try_block = (try_block_info.find(catch_block->GetBlockId()) != try_block_info.end()); + if (is_try_block || MightHaveLiveNormalPredecessors(catch_block)) { + HBasicBlock* new_catch_block = new (allocator_) HBasicBlock(graph_, address); + new_catch_block->AddInstruction(new (allocator_) HGoto(address)); + new_catch_block->AddSuccessor(catch_block); + graph_->AddBlock(new_catch_block); + catch_block = new_catch_block; + } + + catch_blocks.Put(address, catch_block); + catch_block->SetTryCatchInformation( + new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_)); + } + handlers_ptr = iterator.EndDataPointer(); + } + + // Do a pass over the try blocks and insert entering TryBoundaries where at + // least one predecessor is not covered by the same TryItem as the try block. + // We do not split each edge separately, but rather create one boundary block + // that all predecessors are relinked to. This preserves loop headers (b/23895756). + for (const auto& entry : try_block_info) { + uint32_t block_id = entry.first; + const dex::TryItem* try_item = entry.second; + HBasicBlock* try_block = graph_->GetBlocks()[block_id]; + for (HBasicBlock* predecessor : try_block->GetPredecessors()) { + if (GetTryItem(predecessor, try_block_info) != try_item) { + // Found a predecessor not covered by the same TryItem. Insert entering + // boundary block. + HTryBoundary* try_entry = new (allocator_) HTryBoundary( + HTryBoundary::BoundaryKind::kEntry, try_block->GetDexPc()); + try_block->CreateImmediateDominator()->AddInstruction(try_entry); + LinkToCatchBlocks(try_entry, code_item_accessor_, try_item, catch_blocks); + break; + } + } + } + + // Do a second pass over the try blocks and insert exit TryBoundaries where + // the successor is not in the same TryItem. + for (const auto& entry : try_block_info) { + uint32_t block_id = entry.first; + const dex::TryItem* try_item = entry.second; + HBasicBlock* try_block = graph_->GetBlocks()[block_id]; + // NOTE: Do not use iterators because SplitEdge would invalidate them. + for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) { + HBasicBlock* successor = try_block->GetSuccessors()[i]; + + // If the successor is a try block, all of its predecessors must be + // covered by the same TryItem. Otherwise the previous pass would have + // created a non-throwing boundary block. + if (GetTryItem(successor, try_block_info) != nullptr) { + DCHECK_EQ(try_item, GetTryItem(successor, try_block_info)); + continue; + } + + // Insert TryBoundary and link to catch blocks. + HTryBoundary* try_exit = + new (allocator_) HTryBoundary(HTryBoundary::BoundaryKind::kExit, successor->GetDexPc()); + graph_->SplitEdge(try_block, successor)->AddInstruction(try_exit); + LinkToCatchBlocks(try_exit, code_item_accessor_, try_item, catch_blocks); + } + } +} + +void HBasicBlockBuilder::InsertSynthesizedLoopsForOsr() { + ArenaSet targets(allocator_->Adapter(kArenaAllocGraphBuilder)); + // Collect basic blocks that are targets of a negative branch. + for (const DexInstructionPcPair& pair : code_item_accessor_) { + const uint32_t dex_pc = pair.DexPc(); + const Instruction& instruction = pair.Inst(); + if (instruction.IsBranch()) { + uint32_t target_dex_pc = dex_pc + instruction.GetTargetOffset(); + if (target_dex_pc < dex_pc) { + HBasicBlock* block = GetBlockAt(target_dex_pc); + CHECK_NE(kNoDexPc, block->GetDexPc()); + targets.insert(block->GetBlockId()); + } + } else if (instruction.IsSwitch()) { + DexSwitchTable table(instruction, dex_pc); + for (DexSwitchTableIterator s_it(table); !s_it.Done(); s_it.Advance()) { + uint32_t target_dex_pc = dex_pc + s_it.CurrentTargetOffset(); + if (target_dex_pc < dex_pc) { + HBasicBlock* block = GetBlockAt(target_dex_pc); + CHECK_NE(kNoDexPc, block->GetDexPc()); + targets.insert(block->GetBlockId()); + } + } + } + } + + // Insert synthesized loops before the collected blocks. + for (uint32_t block_id : targets) { + HBasicBlock* block = graph_->GetBlocks()[block_id]; + HBasicBlock* loop_block = new (allocator_) HBasicBlock(graph_, block->GetDexPc()); + graph_->AddBlock(loop_block); + while (!block->GetPredecessors().empty()) { + block->GetPredecessors()[0]->ReplaceSuccessor(block, loop_block); + } + loop_block->AddSuccessor(loop_block); + loop_block->AddSuccessor(block); + // We loop on false - we know this won't be optimized later on as the loop + // is marked irreducible, which disables loop optimizations. + loop_block->AddInstruction(new (allocator_) HIf(graph_->GetIntConstant(0), kNoDexPc)); + } +} + +bool HBasicBlockBuilder::Build() { + DCHECK(code_item_accessor_.HasCodeItem()); + DCHECK(graph_->GetBlocks().empty()); + + graph_->SetEntryBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc)); + graph_->SetExitBlock(new (allocator_) HBasicBlock(graph_, kNoDexPc)); + + // TODO(dbrazdil): Do CreateBranchTargets and ConnectBasicBlocks in one pass. + if (!CreateBranchTargets()) { + return false; + } + + ConnectBasicBlocks(); + InsertTryBoundaryBlocks(); + + if (graph_->IsCompilingOsr()) { + InsertSynthesizedLoopsForOsr(); + } + + return true; +} + +void HBasicBlockBuilder::BuildIntrinsic() { + DCHECK(!code_item_accessor_.HasCodeItem()); + DCHECK(graph_->GetBlocks().empty()); + + // Create blocks. + HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc); + HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc); + HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u); + + // Add blocks to the graph. + graph_->AddBlock(entry_block); + graph_->AddBlock(body); + graph_->AddBlock(exit_block); + graph_->SetEntryBlock(entry_block); + graph_->SetExitBlock(exit_block); + + // Connect blocks. + entry_block->AddSuccessor(body); + body->AddSuccessor(exit_block); +} + +size_t HBasicBlockBuilder::GetQuickenIndex(uint32_t dex_pc) const { + return quicken_index_for_dex_pc_.Get(dex_pc); +} + +} // namespace art diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h new file mode 100644 index 0000000..42a3f32 --- /dev/null +++ b/compiler/optimizing/block_builder.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_ +#define ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_ + +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" +#include "dex/code_item_accessors.h" +#include "dex/dex_file.h" +#include "nodes.h" + +namespace art { + +class HBasicBlockBuilder : public ValueObject { + public: + HBasicBlockBuilder(HGraph* graph, + const DexFile* const dex_file, + const CodeItemDebugInfoAccessor& accessor, + ScopedArenaAllocator* local_allocator); + + // Creates basic blocks in `graph_` at branch target dex_pc positions of the + // `code_item_`. Blocks are connected but left unpopulated with instructions. + // TryBoundary blocks are inserted at positions where control-flow enters/ + // exits a try block. + bool Build(); + + // Creates basic blocks in `graph_` for compiling an intrinsic. + void BuildIntrinsic(); + + size_t GetNumberOfBranches() const { return number_of_branches_; } + HBasicBlock* GetBlockAt(uint32_t dex_pc) const { return branch_targets_[dex_pc]; } + + size_t GetQuickenIndex(uint32_t dex_pc) const; + + private: + // Creates a basic block starting at given `dex_pc`. + HBasicBlock* MaybeCreateBlockAt(uint32_t dex_pc); + + // Creates a basic block for bytecode instructions at `semantic_dex_pc` and + // stores it under the `store_dex_pc` key. This is used when multiple blocks + // share the same semantic dex_pc, e.g. when building switch decision trees. + HBasicBlock* MaybeCreateBlockAt(uint32_t semantic_dex_pc, uint32_t store_dex_pc); + + bool CreateBranchTargets(); + void ConnectBasicBlocks(); + void InsertTryBoundaryBlocks(); + + // To ensure branches with negative offsets can always OSR jump to compiled + // code, we insert synthesized loops before each block that is the target of a + // negative branch. + void InsertSynthesizedLoopsForOsr(); + + // Helper method which decides whether `catch_block` may have live normal + // predecessors and thus whether a synthetic catch block needs to be created + // to avoid mixing normal and exceptional predecessors. + // Should only be called during InsertTryBoundaryBlocks on blocks at catch + // handler dex_pcs. + bool MightHaveLiveNormalPredecessors(HBasicBlock* catch_block); + + ArenaAllocator* const allocator_; + HGraph* const graph_; + + const DexFile* const dex_file_; + CodeItemDataAccessor code_item_accessor_; // null code item for intrinsic graph. + + ScopedArenaAllocator* const local_allocator_; + ScopedArenaVector branch_targets_; + ScopedArenaVector throwing_blocks_; + size_t number_of_branches_; + + // A table to quickly find the quicken index for the first instruction of a basic block. + ScopedArenaSafeMap quicken_index_for_dex_pc_; + + static constexpr size_t kDefaultNumberOfThrowingBlocks = 2u; + + DISALLOW_COPY_AND_ASSIGN(HBasicBlockBuilder); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_BLOCK_BUILDER_H_ diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc new file mode 100644 index 0000000..e35d502 --- /dev/null +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -0,0 +1,1977 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bounds_check_elimination.h" + +#include + +#include "base/scoped_arena_allocator.h" +#include "base/scoped_arena_containers.h" +#include "induction_var_range.h" +#include "nodes.h" +#include "side_effects_analysis.h" + +namespace art { + +class MonotonicValueRange; + +/** + * A value bound is represented as a pair of value and constant, + * e.g. array.length - 1. + */ +class ValueBound : public ValueObject { + public: + ValueBound(HInstruction* instruction, int32_t constant) { + if (instruction != nullptr && instruction->IsIntConstant()) { + // Normalize ValueBound with constant instruction. + int32_t instr_const = instruction->AsIntConstant()->GetValue(); + if (!WouldAddOverflowOrUnderflow(instr_const, constant)) { + instruction_ = nullptr; + constant_ = instr_const + constant; + return; + } + } + instruction_ = instruction; + constant_ = constant; + } + + // Return whether (left + right) overflows or underflows. + static bool WouldAddOverflowOrUnderflow(int32_t left, int32_t right) { + if (right == 0) { + return false; + } + if ((right > 0) && (left <= (std::numeric_limits::max() - right))) { + // No overflow. + return false; + } + if ((right < 0) && (left >= (std::numeric_limits::min() - right))) { + // No underflow. + return false; + } + return true; + } + + // Return true if instruction can be expressed as "left_instruction + right_constant". + static bool IsAddOrSubAConstant(HInstruction* instruction, + /* out */ HInstruction** left_instruction, + /* out */ int32_t* right_constant) { + HInstruction* left_so_far = nullptr; + int32_t right_so_far = 0; + while (instruction->IsAdd() || instruction->IsSub()) { + HBinaryOperation* bin_op = instruction->AsBinaryOperation(); + HInstruction* left = bin_op->GetLeft(); + HInstruction* right = bin_op->GetRight(); + if (right->IsIntConstant()) { + int32_t v = right->AsIntConstant()->GetValue(); + int32_t c = instruction->IsAdd() ? v : -v; + if (!WouldAddOverflowOrUnderflow(right_so_far, c)) { + instruction = left; + left_so_far = left; + right_so_far += c; + continue; + } + } + break; + } + // Return result: either false and "null+0" or true and "instr+constant". + *left_instruction = left_so_far; + *right_constant = right_so_far; + return left_so_far != nullptr; + } + + // Expresses any instruction as a value bound. + static ValueBound AsValueBound(HInstruction* instruction) { + if (instruction->IsIntConstant()) { + return ValueBound(nullptr, instruction->AsIntConstant()->GetValue()); + } + HInstruction *left; + int32_t right; + if (IsAddOrSubAConstant(instruction, &left, &right)) { + return ValueBound(left, right); + } + return ValueBound(instruction, 0); + } + + // Try to detect useful value bound format from an instruction, e.g. + // a constant or array length related value. + static ValueBound DetectValueBoundFromValue(HInstruction* instruction, /* out */ bool* found) { + DCHECK(instruction != nullptr); + if (instruction->IsIntConstant()) { + *found = true; + return ValueBound(nullptr, instruction->AsIntConstant()->GetValue()); + } + + if (instruction->IsArrayLength()) { + *found = true; + return ValueBound(instruction, 0); + } + // Try to detect (array.length + c) format. + HInstruction *left; + int32_t right; + if (IsAddOrSubAConstant(instruction, &left, &right)) { + if (left->IsArrayLength()) { + *found = true; + return ValueBound(left, right); + } + } + + // No useful bound detected. + *found = false; + return ValueBound::Max(); + } + + HInstruction* GetInstruction() const { return instruction_; } + int32_t GetConstant() const { return constant_; } + + bool IsRelatedToArrayLength() const { + // Some bounds are created with HNewArray* as the instruction instead + // of HArrayLength*. They are treated the same. + return (instruction_ != nullptr) && + (instruction_->IsArrayLength() || instruction_->IsNewArray()); + } + + bool IsConstant() const { + return instruction_ == nullptr; + } + + static ValueBound Min() { return ValueBound(nullptr, std::numeric_limits::min()); } + static ValueBound Max() { return ValueBound(nullptr, std::numeric_limits::max()); } + + bool Equals(ValueBound bound) const { + return instruction_ == bound.instruction_ && constant_ == bound.constant_; + } + + static bool Equal(HInstruction* instruction1, HInstruction* instruction2) { + if (instruction1 == instruction2) { + return true; + } + if (instruction1 == nullptr || instruction2 == nullptr) { + return false; + } + instruction1 = HuntForDeclaration(instruction1); + instruction2 = HuntForDeclaration(instruction2); + return instruction1 == instruction2; + } + + // Returns if it's certain this->bound >= `bound`. + bool GreaterThanOrEqualTo(ValueBound bound) const { + if (Equal(instruction_, bound.instruction_)) { + return constant_ >= bound.constant_; + } + // Not comparable. Just return false. + return false; + } + + // Returns if it's certain this->bound <= `bound`. + bool LessThanOrEqualTo(ValueBound bound) const { + if (Equal(instruction_, bound.instruction_)) { + return constant_ <= bound.constant_; + } + // Not comparable. Just return false. + return false; + } + + // Returns if it's certain this->bound > `bound`. + bool GreaterThan(ValueBound bound) const { + if (Equal(instruction_, bound.instruction_)) { + return constant_ > bound.constant_; + } + // Not comparable. Just return false. + return false; + } + + // Returns if it's certain this->bound < `bound`. + bool LessThan(ValueBound bound) const { + if (Equal(instruction_, bound.instruction_)) { + return constant_ < bound.constant_; + } + // Not comparable. Just return false. + return false; + } + + // Try to narrow lower bound. Returns the greatest of the two if possible. + // Pick one if they are not comparable. + static ValueBound NarrowLowerBound(ValueBound bound1, ValueBound bound2) { + if (bound1.GreaterThanOrEqualTo(bound2)) { + return bound1; + } + if (bound2.GreaterThanOrEqualTo(bound1)) { + return bound2; + } + + // Not comparable. Just pick one. We may lose some info, but that's ok. + // Favor constant as lower bound. + return bound1.IsConstant() ? bound1 : bound2; + } + + // Try to narrow upper bound. Returns the lowest of the two if possible. + // Pick one if they are not comparable. + static ValueBound NarrowUpperBound(ValueBound bound1, ValueBound bound2) { + if (bound1.LessThanOrEqualTo(bound2)) { + return bound1; + } + if (bound2.LessThanOrEqualTo(bound1)) { + return bound2; + } + + // Not comparable. Just pick one. We may lose some info, but that's ok. + // Favor array length as upper bound. + return bound1.IsRelatedToArrayLength() ? bound1 : bound2; + } + + // Add a constant to a ValueBound. + // `overflow` or `underflow` will return whether the resulting bound may + // overflow or underflow an int. + ValueBound Add(int32_t c, /* out */ bool* overflow, /* out */ bool* underflow) const { + *overflow = *underflow = false; + if (c == 0) { + return *this; + } + + int32_t new_constant; + if (c > 0) { + if (constant_ > (std::numeric_limits::max() - c)) { + *overflow = true; + return Max(); + } + + new_constant = constant_ + c; + // (array.length + non-positive-constant) won't overflow an int. + if (IsConstant() || (IsRelatedToArrayLength() && new_constant <= 0)) { + return ValueBound(instruction_, new_constant); + } + // Be conservative. + *overflow = true; + return Max(); + } else { + if (constant_ < (std::numeric_limits::min() - c)) { + *underflow = true; + return Min(); + } + + new_constant = constant_ + c; + // Regardless of the value new_constant, (array.length+new_constant) will + // never underflow since array.length is no less than 0. + if (IsConstant() || IsRelatedToArrayLength()) { + return ValueBound(instruction_, new_constant); + } + // Be conservative. + *underflow = true; + return Min(); + } + } + + private: + HInstruction* instruction_; + int32_t constant_; +}; + +/** + * Represent a range of lower bound and upper bound, both being inclusive. + * Currently a ValueRange may be generated as a result of the following: + * comparisons related to array bounds, array bounds check, add/sub on top + * of an existing value range, NewArray or a loop phi corresponding to an + * incrementing/decrementing array index (MonotonicValueRange). + */ +class ValueRange : public ArenaObject { + public: + ValueRange(ScopedArenaAllocator* allocator, ValueBound lower, ValueBound upper) + : allocator_(allocator), lower_(lower), upper_(upper) {} + + virtual ~ValueRange() {} + + virtual MonotonicValueRange* AsMonotonicValueRange() { return nullptr; } + bool IsMonotonicValueRange() { + return AsMonotonicValueRange() != nullptr; + } + + ScopedArenaAllocator* GetAllocator() const { return allocator_; } + ValueBound GetLower() const { return lower_; } + ValueBound GetUpper() const { return upper_; } + + bool IsConstantValueRange() const { return lower_.IsConstant() && upper_.IsConstant(); } + + // If it's certain that this value range fits in other_range. + virtual bool FitsIn(ValueRange* other_range) const { + if (other_range == nullptr) { + return true; + } + DCHECK(!other_range->IsMonotonicValueRange()); + return lower_.GreaterThanOrEqualTo(other_range->lower_) && + upper_.LessThanOrEqualTo(other_range->upper_); + } + + // Returns the intersection of this and range. + // If it's not possible to do intersection because some + // bounds are not comparable, it's ok to pick either bound. + virtual ValueRange* Narrow(ValueRange* range) { + if (range == nullptr) { + return this; + } + + if (range->IsMonotonicValueRange()) { + return this; + } + + return new (allocator_) ValueRange( + allocator_, + ValueBound::NarrowLowerBound(lower_, range->lower_), + ValueBound::NarrowUpperBound(upper_, range->upper_)); + } + + // Shift a range by a constant. + ValueRange* Add(int32_t constant) const { + bool overflow, underflow; + ValueBound lower = lower_.Add(constant, &overflow, &underflow); + if (underflow) { + // Lower bound underflow will wrap around to positive values + // and invalidate the upper bound. + return nullptr; + } + ValueBound upper = upper_.Add(constant, &overflow, &underflow); + if (overflow) { + // Upper bound overflow will wrap around to negative values + // and invalidate the lower bound. + return nullptr; + } + return new (allocator_) ValueRange(allocator_, lower, upper); + } + + private: + ScopedArenaAllocator* const allocator_; + const ValueBound lower_; // inclusive + const ValueBound upper_; // inclusive + + DISALLOW_COPY_AND_ASSIGN(ValueRange); +}; + +/** + * A monotonically incrementing/decrementing value range, e.g. + * the variable i in "for (int i=0; iGetBlock()->IsLoopHeader()); + return induction_variable_->GetBlock(); + } + + MonotonicValueRange* AsMonotonicValueRange() override { return this; } + + // If it's certain that this value range fits in other_range. + bool FitsIn(ValueRange* other_range) const override { + if (other_range == nullptr) { + return true; + } + DCHECK(!other_range->IsMonotonicValueRange()); + return false; + } + + // Try to narrow this MonotonicValueRange given another range. + // Ideally it will return a normal ValueRange. But due to + // possible overflow/underflow, that may not be possible. + ValueRange* Narrow(ValueRange* range) override { + if (range == nullptr) { + return this; + } + DCHECK(!range->IsMonotonicValueRange()); + + if (increment_ > 0) { + // Monotonically increasing. + ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower()); + if (!lower.IsConstant() || lower.GetConstant() == std::numeric_limits::min()) { + // Lower bound isn't useful. Leave it to deoptimization. + return this; + } + + // We currently conservatively assume max array length is Max(). + // If we can make assumptions about the max array length, e.g. due to the max heap size, + // divided by the element size (such as 4 bytes for each integer array), we can + // lower this number and rule out some possible overflows. + int32_t max_array_len = std::numeric_limits::max(); + + // max possible integer value of range's upper value. + int32_t upper = std::numeric_limits::max(); + // Try to lower upper. + ValueBound upper_bound = range->GetUpper(); + if (upper_bound.IsConstant()) { + upper = upper_bound.GetConstant(); + } else if (upper_bound.IsRelatedToArrayLength() && upper_bound.GetConstant() <= 0) { + // Normal case. e.g. <= array.length - 1. + upper = max_array_len + upper_bound.GetConstant(); + } + + // If we can prove for the last number in sequence of initial_, + // initial_ + increment_, initial_ + 2 x increment_, ... + // that's <= upper, (last_num_in_sequence + increment_) doesn't trigger overflow, + // then this MonoticValueRange is narrowed to a normal value range. + + // Be conservative first, assume last number in the sequence hits upper. + int32_t last_num_in_sequence = upper; + if (initial_->IsIntConstant()) { + int32_t initial_constant = initial_->AsIntConstant()->GetValue(); + if (upper <= initial_constant) { + last_num_in_sequence = upper; + } else { + // Cast to int64_t for the substraction part to avoid int32_t overflow. + last_num_in_sequence = initial_constant + + ((int64_t)upper - (int64_t)initial_constant) / increment_ * increment_; + } + } + if (last_num_in_sequence <= (std::numeric_limits::max() - increment_)) { + // No overflow. The sequence will be stopped by the upper bound test as expected. + return new (GetAllocator()) ValueRange(GetAllocator(), lower, range->GetUpper()); + } + + // There might be overflow. Give up narrowing. + return this; + } else { + DCHECK_NE(increment_, 0); + // Monotonically decreasing. + ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper()); + if ((!upper.IsConstant() || upper.GetConstant() == std::numeric_limits::max()) && + !upper.IsRelatedToArrayLength()) { + // Upper bound isn't useful. Leave it to deoptimization. + return this; + } + + // Need to take care of underflow. Try to prove underflow won't happen + // for common cases. + if (range->GetLower().IsConstant()) { + int32_t constant = range->GetLower().GetConstant(); + if (constant >= (std::numeric_limits::min() - increment_)) { + return new (GetAllocator()) ValueRange(GetAllocator(), range->GetLower(), upper); + } + } + + // For non-constant lower bound, just assume might be underflow. Give up narrowing. + return this; + } + } + + private: + HPhi* const induction_variable_; // Induction variable for this monotonic value range. + HInstruction* const initial_; // Initial value. + const int32_t increment_; // Increment for each loop iteration. + const ValueBound bound_; // Additional value bound info for initial_. + + DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange); +}; + +class BCEVisitor : public HGraphVisitor { + public: + // The least number of bounds checks that should be eliminated by triggering + // the deoptimization technique. + static constexpr size_t kThresholdForAddingDeoptimize = 2; + + // Very large lengths are considered an anomaly. This is a threshold beyond which we don't + // bother to apply the deoptimization technique since it's likely, or sometimes certain, + // an AIOOBE will be thrown. + static constexpr uint32_t kMaxLengthForAddingDeoptimize = + std::numeric_limits::max() - 1024 * 1024; + + // Added blocks for loop body entry test. + bool IsAddedBlock(HBasicBlock* block) const { + return block->GetBlockId() >= initial_block_size_; + } + + BCEVisitor(HGraph* graph, + const SideEffectsAnalysis& side_effects, + HInductionVarAnalysis* induction_analysis) + : HGraphVisitor(graph), + allocator_(graph->GetArenaStack()), + maps_(graph->GetBlocks().size(), + ScopedArenaSafeMap( + std::less(), + allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + first_index_bounds_check_map_(std::less(), + allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + early_exit_loop_(std::less(), + allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + taken_test_loop_(std::less(), + allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + finite_loop_(allocator_.Adapter(kArenaAllocBoundsCheckElimination)), + has_dom_based_dynamic_bce_(false), + initial_block_size_(graph->GetBlocks().size()), + side_effects_(side_effects), + induction_range_(induction_analysis), + next_(nullptr) {} + + void VisitBasicBlock(HBasicBlock* block) override { + DCHECK(!IsAddedBlock(block)); + first_index_bounds_check_map_.clear(); + // Visit phis and instructions using a safe iterator. The iteration protects + // against deleting the current instruction during iteration. However, it + // must advance next_ if that instruction is deleted during iteration. + for (HInstruction* instruction = block->GetFirstPhi(); instruction != nullptr;) { + DCHECK(instruction->IsInBlock()); + next_ = instruction->GetNext(); + instruction->Accept(this); + instruction = next_; + } + for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) { + DCHECK(instruction->IsInBlock()); + next_ = instruction->GetNext(); + instruction->Accept(this); + instruction = next_; + } + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (!GetGraph()->IsCompilingOsr()) { + AddComparesWithDeoptimization(block); + } + } + + void Finish() { + // Preserve SSA structure which may have been broken by adding one or more + // new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()). + InsertPhiNodes(); + + // Clear the loop data structures. + early_exit_loop_.clear(); + taken_test_loop_.clear(); + finite_loop_.clear(); + } + + private: + // Return the map of proven value ranges at the beginning of a basic block. + ScopedArenaSafeMap* GetValueRangeMap(HBasicBlock* basic_block) { + if (IsAddedBlock(basic_block)) { + // Added blocks don't keep value ranges. + return nullptr; + } + return &maps_[basic_block->GetBlockId()]; + } + + // Traverse up the dominator tree to look for value range info. + ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) { + while (basic_block != nullptr) { + ScopedArenaSafeMap* map = GetValueRangeMap(basic_block); + if (map != nullptr) { + if (map->find(instruction->GetId()) != map->end()) { + return map->Get(instruction->GetId()); + } + } else { + DCHECK(IsAddedBlock(basic_block)); + } + basic_block = basic_block->GetDominator(); + } + // Didn't find any. + return nullptr; + } + + // Helper method to assign a new range to an instruction in given basic block. + void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) { + DCHECK(!range->IsMonotonicValueRange() || instruction->IsLoopHeaderPhi()); + GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range); + } + + // Narrow the value range of `instruction` at the end of `basic_block` with `range`, + // and push the narrowed value range to `successor`. + void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block, + HBasicBlock* successor, ValueRange* range) { + ValueRange* existing_range = LookupValueRange(instruction, basic_block); + if (existing_range == nullptr) { + if (range != nullptr) { + AssignRange(successor, instruction, range); + } + return; + } + if (existing_range->IsMonotonicValueRange()) { + DCHECK(instruction->IsLoopHeaderPhi()); + // Make sure the comparison is in the loop header so each increment is + // checked with a comparison. + if (instruction->GetBlock() != basic_block) { + return; + } + } + AssignRange(successor, instruction, existing_range->Narrow(range)); + } + + // Special case that we may simultaneously narrow two MonotonicValueRange's to + // regular value ranges. + void HandleIfBetweenTwoMonotonicValueRanges(HIf* instruction, + HInstruction* left, + HInstruction* right, + IfCondition cond, + MonotonicValueRange* left_range, + MonotonicValueRange* right_range) { + DCHECK(left->IsLoopHeaderPhi()); + DCHECK(right->IsLoopHeaderPhi()); + if (instruction->GetBlock() != left->GetBlock()) { + // Comparison needs to be in loop header to make sure it's done after each + // increment/decrement. + return; + } + + // Handle common cases which also don't have overflow/underflow concerns. + if (left_range->GetIncrement() == 1 && + left_range->GetBound().IsConstant() && + right_range->GetIncrement() == -1 && + right_range->GetBound().IsRelatedToArrayLength() && + right_range->GetBound().GetConstant() < 0) { + HBasicBlock* successor = nullptr; + int32_t left_compensation = 0; + int32_t right_compensation = 0; + if (cond == kCondLT) { + left_compensation = -1; + right_compensation = 1; + successor = instruction->IfTrueSuccessor(); + } else if (cond == kCondLE) { + successor = instruction->IfTrueSuccessor(); + } else if (cond == kCondGT) { + successor = instruction->IfFalseSuccessor(); + } else if (cond == kCondGE) { + left_compensation = -1; + right_compensation = 1; + successor = instruction->IfFalseSuccessor(); + } else { + // We don't handle '=='/'!=' test in case left and right can cross and + // miss each other. + return; + } + + if (successor != nullptr) { + bool overflow; + bool underflow; + ValueRange* new_left_range = new (&allocator_) ValueRange( + &allocator_, + left_range->GetBound(), + right_range->GetBound().Add(left_compensation, &overflow, &underflow)); + if (!overflow && !underflow) { + ApplyRangeFromComparison(left, instruction->GetBlock(), successor, + new_left_range); + } + + ValueRange* new_right_range = new (&allocator_) ValueRange( + &allocator_, + left_range->GetBound().Add(right_compensation, &overflow, &underflow), + right_range->GetBound()); + if (!overflow && !underflow) { + ApplyRangeFromComparison(right, instruction->GetBlock(), successor, + new_right_range); + } + } + } + } + + // Handle "if (left cmp_cond right)". + void HandleIf(HIf* instruction, HInstruction* left, HInstruction* right, IfCondition cond) { + HBasicBlock* block = instruction->GetBlock(); + + HBasicBlock* true_successor = instruction->IfTrueSuccessor(); + // There should be no critical edge at this point. + DCHECK_EQ(true_successor->GetPredecessors().size(), 1u); + + HBasicBlock* false_successor = instruction->IfFalseSuccessor(); + // There should be no critical edge at this point. + DCHECK_EQ(false_successor->GetPredecessors().size(), 1u); + + ValueRange* left_range = LookupValueRange(left, block); + MonotonicValueRange* left_monotonic_range = nullptr; + if (left_range != nullptr) { + left_monotonic_range = left_range->AsMonotonicValueRange(); + if (left_monotonic_range != nullptr) { + HBasicBlock* loop_head = left_monotonic_range->GetLoopHeader(); + if (instruction->GetBlock() != loop_head) { + // For monotonic value range, don't handle `instruction` + // if it's not defined in the loop header. + return; + } + } + } + + bool found; + ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found); + // Each comparison can establish a lower bound and an upper bound + // for the left hand side. + ValueBound lower = bound; + ValueBound upper = bound; + if (!found) { + // No constant or array.length+c format bound found. + // For iIsMonotonicValueRange()) { + if (left_range != nullptr && left_range->IsMonotonicValueRange()) { + HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond, + left_range->AsMonotonicValueRange(), + right_range->AsMonotonicValueRange()); + return; + } + } + lower = right_range->GetLower(); + upper = right_range->GetUpper(); + } else { + lower = ValueBound::Min(); + upper = ValueBound::Max(); + } + } + + bool overflow, underflow; + if (cond == kCondLT || cond == kCondLE) { + if (!upper.Equals(ValueBound::Max())) { + int32_t compensation = (cond == kCondLT) ? -1 : 0; // upper bound is inclusive + ValueBound new_upper = upper.Add(compensation, &overflow, &underflow); + if (overflow || underflow) { + return; + } + ValueRange* new_range = new (&allocator_) ValueRange( + &allocator_, ValueBound::Min(), new_upper); + ApplyRangeFromComparison(left, block, true_successor, new_range); + } + + // array.length as a lower bound isn't considered useful. + if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) { + int32_t compensation = (cond == kCondLE) ? 1 : 0; // lower bound is inclusive + ValueBound new_lower = lower.Add(compensation, &overflow, &underflow); + if (overflow || underflow) { + return; + } + ValueRange* new_range = new (&allocator_) ValueRange( + &allocator_, new_lower, ValueBound::Max()); + ApplyRangeFromComparison(left, block, false_successor, new_range); + } + } else if (cond == kCondGT || cond == kCondGE) { + // array.length as a lower bound isn't considered useful. + if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) { + int32_t compensation = (cond == kCondGT) ? 1 : 0; // lower bound is inclusive + ValueBound new_lower = lower.Add(compensation, &overflow, &underflow); + if (overflow || underflow) { + return; + } + ValueRange* new_range = new (&allocator_) ValueRange( + &allocator_, new_lower, ValueBound::Max()); + ApplyRangeFromComparison(left, block, true_successor, new_range); + } + + if (!upper.Equals(ValueBound::Max())) { + int32_t compensation = (cond == kCondGE) ? -1 : 0; // upper bound is inclusive + ValueBound new_upper = upper.Add(compensation, &overflow, &underflow); + if (overflow || underflow) { + return; + } + ValueRange* new_range = new (&allocator_) ValueRange( + &allocator_, ValueBound::Min(), new_upper); + ApplyRangeFromComparison(left, block, false_successor, new_range); + } + } else if (cond == kCondNE || cond == kCondEQ) { + if (left->IsArrayLength()) { + if (lower.IsConstant() && upper.IsConstant()) { + // Special case: + // length == [c,d] yields [c, d] along true + // length != [c,d] yields [c, d] along false + if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) { + ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper); + ApplyRangeFromComparison( + left, block, cond == kCondEQ ? true_successor : false_successor, new_range); + } + // In addition: + // length == 0 yields [1, max] along false + // length != 0 yields [1, max] along true + if (lower.GetConstant() == 0 && upper.GetConstant() == 0) { + ValueRange* new_range = new (&allocator_) ValueRange( + &allocator_, ValueBound(nullptr, 1), ValueBound::Max()); + ApplyRangeFromComparison( + left, block, cond == kCondEQ ? false_successor : true_successor, new_range); + } + } + } else if (lower.IsRelatedToArrayLength() && lower.Equals(upper)) { + // Special aliasing case, with x not array length itself: + // x == [length,length] yields x == length along true + // x != [length,length] yields x == length along false + ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper); + ApplyRangeFromComparison( + left, block, cond == kCondEQ ? true_successor : false_successor, new_range); + } + } + } + + void VisitBoundsCheck(HBoundsCheck* bounds_check) override { + HBasicBlock* block = bounds_check->GetBlock(); + HInstruction* index = bounds_check->InputAt(0); + HInstruction* array_length = bounds_check->InputAt(1); + DCHECK(array_length->IsIntConstant() || + array_length->IsArrayLength() || + array_length->IsPhi()); + bool try_dynamic_bce = true; + // Analyze index range. + if (!index->IsIntConstant()) { + // Non-constant index. + ValueBound lower = ValueBound(nullptr, 0); // constant 0 + ValueBound upper = ValueBound(array_length, -1); // array_length - 1 + ValueRange array_range(&allocator_, lower, upper); + // Try index range obtained by dominator-based analysis. + ValueRange* index_range = LookupValueRange(index, block); + if (index_range != nullptr) { + if (index_range->FitsIn(&array_range)) { + ReplaceInstruction(bounds_check, index); + return; + } else if (index_range->IsConstantValueRange()) { + // If the non-constant index turns out to have a constant range, + // make one more attempt to get a constant in the array range. + ValueRange* existing_range = LookupValueRange(array_length, block); + if (existing_range != nullptr && + existing_range->IsConstantValueRange() && + existing_range->GetLower().GetConstant() > 0) { + ValueBound constant_upper(nullptr, existing_range->GetLower().GetConstant() - 1); + ValueRange constant_array_range(&allocator_, lower, constant_upper); + if (index_range->FitsIn(&constant_array_range)) { + ReplaceInstruction(bounds_check, index); + return; + } + } + } + } + // Try index range obtained by induction variable analysis. + // Disables dynamic bce if OOB is certain. + if (InductionRangeFitsIn(&array_range, bounds_check, &try_dynamic_bce)) { + ReplaceInstruction(bounds_check, index); + return; + } + } else { + // Constant index. + int32_t constant = index->AsIntConstant()->GetValue(); + if (constant < 0) { + // Will always throw exception. + return; + } else if (array_length->IsIntConstant()) { + if (constant < array_length->AsIntConstant()->GetValue()) { + ReplaceInstruction(bounds_check, index); + } + return; + } + // Analyze array length range. + DCHECK(array_length->IsArrayLength()); + ValueRange* existing_range = LookupValueRange(array_length, block); + if (existing_range != nullptr) { + ValueBound lower = existing_range->GetLower(); + DCHECK(lower.IsConstant()); + if (constant < lower.GetConstant()) { + ReplaceInstruction(bounds_check, index); + return; + } else { + // Existing range isn't strong enough to eliminate the bounds check. + // Fall through to update the array_length range with info from this + // bounds check. + } + } + // Once we have an array access like 'array[5] = 1', we record array.length >= 6. + // We currently don't do it for non-constant index since a valid array[i] can't prove + // a valid array[i-1] yet due to the lower bound side. + if (constant == std::numeric_limits::max()) { + // Max() as an index will definitely throw AIOOBE. + return; + } else { + ValueBound lower = ValueBound(nullptr, constant + 1); + ValueBound upper = ValueBound::Max(); + ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper); + AssignRange(block, array_length, range); + } + } + + // If static analysis fails, and OOB is not certain, try dynamic elimination. + if (try_dynamic_bce) { + // Try loop-based dynamic elimination. + HLoopInformation* loop = bounds_check->GetBlock()->GetLoopInformation(); + bool needs_finite_test = false; + bool needs_taken_test = false; + if (DynamicBCESeemsProfitable(loop, bounds_check->GetBlock()) && + induction_range_.CanGenerateRange( + bounds_check, index, &needs_finite_test, &needs_taken_test) && + CanHandleInfiniteLoop(loop, index, needs_finite_test) && + // Do this test last, since it may generate code. + CanHandleLength(loop, array_length, needs_taken_test)) { + TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test); + TransformLoopForDynamicBCE(loop, bounds_check); + return; + } + // Otherwise, prepare dominator-based dynamic elimination. + if (first_index_bounds_check_map_.find(array_length->GetId()) == + first_index_bounds_check_map_.end()) { + // Remember the first bounds check against each array_length. That bounds check + // instruction has an associated HEnvironment where we may add an HDeoptimize + // to eliminate subsequent bounds checks against the same array_length. + first_index_bounds_check_map_.Put(array_length->GetId(), bounds_check); + } + } + } + + static bool HasSameInputAtBackEdges(HPhi* phi) { + DCHECK(phi->IsLoopHeaderPhi()); + HConstInputsRef inputs = phi->GetInputs(); + // Start with input 1. Input 0 is from the incoming block. + const HInstruction* input1 = inputs[1]; + DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge( + *phi->GetBlock()->GetPredecessors()[1])); + for (size_t i = 2; i < inputs.size(); ++i) { + DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge( + *phi->GetBlock()->GetPredecessors()[i])); + if (input1 != inputs[i]) { + return false; + } + } + return true; + } + + void VisitPhi(HPhi* phi) override { + if (phi->IsLoopHeaderPhi() + && (phi->GetType() == DataType::Type::kInt32) + && HasSameInputAtBackEdges(phi)) { + HInstruction* instruction = phi->InputAt(1); + HInstruction *left; + int32_t increment; + if (ValueBound::IsAddOrSubAConstant(instruction, &left, &increment)) { + if (left == phi) { + HInstruction* initial_value = phi->InputAt(0); + ValueRange* range = nullptr; + if (increment == 0) { + // Add constant 0. It's really a fixed value. + range = new (&allocator_) ValueRange( + &allocator_, + ValueBound(initial_value, 0), + ValueBound(initial_value, 0)); + } else { + // Monotonically increasing/decreasing. + bool found; + ValueBound bound = ValueBound::DetectValueBoundFromValue( + initial_value, &found); + if (!found) { + // No constant or array.length+c bound found. + // For i=j, we can still use j's upper bound as i's upper bound. + // Same for lower. + ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock()); + if (initial_range != nullptr) { + bound = increment > 0 ? initial_range->GetLower() : + initial_range->GetUpper(); + } else { + bound = increment > 0 ? ValueBound::Min() : ValueBound::Max(); + } + } + range = new (&allocator_) MonotonicValueRange( + &allocator_, + phi, + initial_value, + increment, + bound); + } + AssignRange(phi->GetBlock(), phi, range); + } + } + } + } + + void VisitIf(HIf* instruction) override { + if (instruction->InputAt(0)->IsCondition()) { + HCondition* cond = instruction->InputAt(0)->AsCondition(); + HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition()); + } + } + + void VisitAdd(HAdd* add) override { + HInstruction* right = add->GetRight(); + if (right->IsIntConstant()) { + ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock()); + if (left_range == nullptr) { + return; + } + ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue()); + if (range != nullptr) { + AssignRange(add->GetBlock(), add, range); + } + } + } + + void VisitSub(HSub* sub) override { + HInstruction* left = sub->GetLeft(); + HInstruction* right = sub->GetRight(); + if (right->IsIntConstant()) { + ValueRange* left_range = LookupValueRange(left, sub->GetBlock()); + if (left_range == nullptr) { + return; + } + ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue()); + if (range != nullptr) { + AssignRange(sub->GetBlock(), sub, range); + return; + } + } + + // Here we are interested in the typical triangular case of nested loops, + // such as the inner loop 'for (int j=0; jIsArrayLength()) { + HInstruction* array_length = left->AsArrayLength(); + ValueRange* right_range = LookupValueRange(right, sub->GetBlock()); + if (right_range != nullptr) { + ValueBound lower = right_range->GetLower(); + ValueBound upper = right_range->GetUpper(); + if (lower.IsConstant() && upper.IsRelatedToArrayLength()) { + HInstruction* upper_inst = upper.GetInstruction(); + // Make sure it's the same array. + if (ValueBound::Equal(array_length, upper_inst)) { + int32_t c0 = right_const; + int32_t c1 = lower.GetConstant(); + int32_t c2 = upper.GetConstant(); + // (array.length + c0 - v) where v is in [c1, array.length + c2] + // gets [c0 - c2, array.length + c0 - c1] as its value range. + if (!ValueBound::WouldAddOverflowOrUnderflow(c0, -c2) && + !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) { + if ((c0 - c1) <= 0) { + // array.length + (c0 - c1) won't overflow/underflow. + ValueRange* range = new (&allocator_) ValueRange( + &allocator_, + ValueBound(nullptr, right_const - upper.GetConstant()), + ValueBound(array_length, right_const - lower.GetConstant())); + AssignRange(sub->GetBlock(), sub, range); + } + } + } + } + } + } + } + + void FindAndHandlePartialArrayLength(HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsShr() || instruction->IsUShr()); + HInstruction* right = instruction->GetRight(); + int32_t right_const; + if (right->IsIntConstant()) { + right_const = right->AsIntConstant()->GetValue(); + // Detect division by two or more. + if ((instruction->IsDiv() && right_const <= 1) || + (instruction->IsShr() && right_const < 1) || + (instruction->IsUShr() && right_const < 1)) { + return; + } + } else { + return; + } + + // Try to handle array.length/2 or (array.length-1)/2 format. + HInstruction* left = instruction->GetLeft(); + HInstruction* left_of_left; // left input of left. + int32_t c = 0; + if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &c)) { + left = left_of_left; + } + // The value of left input of instruction equals (left + c). + + // (array_length + 1) or smaller divided by two or more + // always generate a value in [Min(), array_length]. + // This is true even if array_length is Max(). + if (left->IsArrayLength() && c <= 1) { + if (instruction->IsUShr() && c < 0) { + // Make sure for unsigned shift, left side is not negative. + // e.g. if array_length is 2, ((array_length - 3) >>> 2) is way bigger + // than array_length. + return; + } + ValueRange* range = new (&allocator_) ValueRange( + &allocator_, + ValueBound(nullptr, std::numeric_limits::min()), + ValueBound(left, 0)); + AssignRange(instruction->GetBlock(), instruction, range); + } + } + + void VisitDiv(HDiv* div) override { + FindAndHandlePartialArrayLength(div); + } + + void VisitShr(HShr* shr) override { + FindAndHandlePartialArrayLength(shr); + } + + void VisitUShr(HUShr* ushr) override { + FindAndHandlePartialArrayLength(ushr); + } + + void VisitAnd(HAnd* instruction) override { + if (instruction->GetRight()->IsIntConstant()) { + int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue(); + if (constant > 0) { + // constant serves as a mask so any number masked with it + // gets a [0, constant] value range. + ValueRange* range = new (&allocator_) ValueRange( + &allocator_, + ValueBound(nullptr, 0), + ValueBound(nullptr, constant)); + AssignRange(instruction->GetBlock(), instruction, range); + } + } + } + + void VisitRem(HRem* instruction) override { + HInstruction* left = instruction->GetLeft(); + HInstruction* right = instruction->GetRight(); + + // Handle 'i % CONST' format expression in array index, e.g: + // array[i % 20]; + if (right->IsIntConstant()) { + int32_t right_const = std::abs(right->AsIntConstant()->GetValue()); + if (right_const == 0) { + return; + } + // The sign of divisor CONST doesn't affect the sign final value range. + // For example: + // if (i > 0) { + // array[i % 10]; // index value range [0, 9] + // array[i % -10]; // index value range [0, 9] + // } + ValueRange* right_range = new (&allocator_) ValueRange( + &allocator_, + ValueBound(nullptr, 1 - right_const), + ValueBound(nullptr, right_const - 1)); + + ValueRange* left_range = LookupValueRange(left, instruction->GetBlock()); + if (left_range != nullptr) { + right_range = right_range->Narrow(left_range); + } + AssignRange(instruction->GetBlock(), instruction, right_range); + return; + } + + // Handle following pattern: + // i0 NullCheck + // i1 ArrayLength[i0] + // i2 DivByZeroCheck [i1] <-- right + // i3 Rem [i5, i2] <-- we are here. + // i4 BoundsCheck [i3,i1] + if (right->IsDivZeroCheck()) { + // if array_length can pass div-by-zero check, + // array_length must be > 0. + right = right->AsDivZeroCheck()->InputAt(0); + } + + // Handle 'i % array.length' format expression in array index, e.g: + // array[(i+7) % array.length]; + if (right->IsArrayLength()) { + ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'. + ValueBound upper = ValueBound(right, -1); // array_length - 1 + ValueRange* right_range = new (&allocator_) ValueRange( + &allocator_, + lower, + upper); + ValueRange* left_range = LookupValueRange(left, instruction->GetBlock()); + if (left_range != nullptr) { + right_range = right_range->Narrow(left_range); + } + AssignRange(instruction->GetBlock(), instruction, right_range); + return; + } + } + + void VisitNewArray(HNewArray* new_array) override { + HInstruction* len = new_array->GetLength(); + if (!len->IsIntConstant()) { + HInstruction *left; + int32_t right_const; + if (ValueBound::IsAddOrSubAConstant(len, &left, &right_const)) { + // (left + right_const) is used as size to new the array. + // We record "-right_const <= left <= new_array - right_const"; + ValueBound lower = ValueBound(nullptr, -right_const); + // We use new_array for the bound instead of new_array.length, + // which isn't available as an instruction yet. new_array will + // be treated the same as new_array.length when it's used in a ValueBound. + ValueBound upper = ValueBound(new_array, -right_const); + ValueRange* range = new (&allocator_) ValueRange(&allocator_, lower, upper); + ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock()); + if (existing_range != nullptr) { + range = existing_range->Narrow(range); + } + AssignRange(new_array->GetBlock(), left, range); + } + } + } + + /** + * After null/bounds checks are eliminated, some invariant array references + * may be exposed underneath which can be hoisted out of the loop to the + * preheader or, in combination with dynamic bce, the deoptimization block. + * + * for (int i = 0; i < n; i++) { + * <-------+ + * for (int j = 0; j < n; j++) | + * a[i][j] = 0; --a[i]--+ + * } + * + * Note: this optimization is no longer applied after dominator-based dynamic deoptimization + * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be + * unsafe to hoist array references across their deoptimization instruction inside a loop. + */ + void VisitArrayGet(HArrayGet* array_get) override { + if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) { + HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation(); + if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) && + loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) { + SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader()); + if (!array_get->GetSideEffects().MayDependOn(loop_effects)) { + // We can hoist ArrayGet only if its execution is guaranteed on every iteration. + // In other words only if array_get_bb dominates all back branches. + if (loop->DominatesAllBackEdges(array_get->GetBlock())) { + HoistToPreHeaderOrDeoptBlock(loop, array_get); + } + } + } + } + } + + /** Performs dominator-based dynamic elimination on suitable set of bounds checks. */ + void AddCompareWithDeoptimization(HBasicBlock* block, + HInstruction* array_length, + HInstruction* base, + int32_t min_c, int32_t max_c) { + HBoundsCheck* bounds_check = + first_index_bounds_check_map_.Get(array_length->GetId())->AsBoundsCheck(); + // Construct deoptimization on single or double bounds on range [base-min_c,base+max_c], + // for example either for a[0]..a[3] just 3 or for a[base-1]..a[base+3] both base-1 + // and base+3, since we made the assumption any in between value may occur too. + // In code, using unsigned comparisons: + // (1) constants only + // if (max_c >= a.length) deoptimize; + // (2) general case + // if (base-min_c > base+max_c) deoptimize; + // if (base+max_c >= a.length ) deoptimize; + static_assert(kMaxLengthForAddingDeoptimize < std::numeric_limits::max(), + "Incorrect max length may be subject to arithmetic wrap-around"); + HInstruction* upper = GetGraph()->GetIntConstant(max_c); + if (base == nullptr) { + DCHECK_GE(min_c, 0); + } else { + HInstruction* lower = new (GetGraph()->GetAllocator()) + HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c)); + upper = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, upper); + block->InsertInstructionBefore(lower, bounds_check); + block->InsertInstructionBefore(upper, bounds_check); + InsertDeoptInBlock(bounds_check, new (GetGraph()->GetAllocator()) HAbove(lower, upper)); + } + InsertDeoptInBlock( + bounds_check, new (GetGraph()->GetAllocator()) HAboveOrEqual(upper, array_length)); + // Flag that this kind of deoptimization has occurred. + has_dom_based_dynamic_bce_ = true; + } + + /** Attempts dominator-based dynamic elimination on remaining candidates. */ + void AddComparesWithDeoptimization(HBasicBlock* block) { + for (const auto& entry : first_index_bounds_check_map_) { + HBoundsCheck* bounds_check = entry.second; + HInstruction* index = bounds_check->InputAt(0); + HInstruction* array_length = bounds_check->InputAt(1); + if (!array_length->IsArrayLength()) { + continue; // disregard phis and constants + } + // Collect all bounds checks that are still there and that are related as "a[base + constant]" + // for a base instruction (possibly absent) and various constants. Note that no attempt + // is made to partition the set into matching subsets (viz. a[0], a[1] and a[base+1] and + // a[base+2] are considered as one set). + // TODO: would such a partitioning be worthwhile? + ValueBound value = ValueBound::AsValueBound(index); + HInstruction* base = value.GetInstruction(); + int32_t min_c = base == nullptr ? 0 : value.GetConstant(); + int32_t max_c = value.GetConstant(); + ScopedArenaVector candidates( + allocator_.Adapter(kArenaAllocBoundsCheckElimination)); + ScopedArenaVector standby( + allocator_.Adapter(kArenaAllocBoundsCheckElimination)); + for (const HUseListNode& use : array_length->GetUses()) { + // Another bounds check in same or dominated block? + HInstruction* user = use.GetUser(); + HBasicBlock* other_block = user->GetBlock(); + if (user->IsBoundsCheck() && block->Dominates(other_block)) { + HBoundsCheck* other_bounds_check = user->AsBoundsCheck(); + HInstruction* other_index = other_bounds_check->InputAt(0); + HInstruction* other_array_length = other_bounds_check->InputAt(1); + ValueBound other_value = ValueBound::AsValueBound(other_index); + if (array_length == other_array_length && base == other_value.GetInstruction()) { + // Reject certain OOB if BoundsCheck(l, l) occurs on considered subset. + if (array_length == other_index) { + candidates.clear(); + standby.clear(); + break; + } + // Since a subsequent dominated block could be under a conditional, only accept + // the other bounds check if it is in same block or both blocks dominate the exit. + // TODO: we could improve this by testing proper post-dominance, or even if this + // constant is seen along *all* conditional paths that follow. + HBasicBlock* exit = GetGraph()->GetExitBlock(); + if (block == user->GetBlock() || + (block->Dominates(exit) && other_block->Dominates(exit))) { + int32_t other_c = other_value.GetConstant(); + min_c = std::min(min_c, other_c); + max_c = std::max(max_c, other_c); + candidates.push_back(other_bounds_check); + } else { + // Add this candidate later only if it falls into the range. + standby.push_back(other_bounds_check); + } + } + } + } + // Add standby candidates that fall in selected range. + for (HBoundsCheck* other_bounds_check : standby) { + HInstruction* other_index = other_bounds_check->InputAt(0); + int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant(); + if (min_c <= other_c && other_c <= max_c) { + candidates.push_back(other_bounds_check); + } + } + // Perform dominator-based deoptimization if it seems profitable, where we eliminate + // bounds checks and replace these with deopt checks that guard against any possible + // OOB. Note that we reject cases where the distance min_c:max_c range gets close to + // the maximum possible array length, since those cases are likely to always deopt + // (such situations do not necessarily go OOB, though, since the array could be really + // large, or the programmer could rely on arithmetic wrap-around from max to min). + size_t threshold = kThresholdForAddingDeoptimize + (base == nullptr ? 0 : 1); // extra test? + uint32_t distance = static_cast(max_c) - static_cast(min_c); + if (candidates.size() >= threshold && + (base != nullptr || min_c >= 0) && // reject certain OOB + distance <= kMaxLengthForAddingDeoptimize) { // reject likely/certain deopt + AddCompareWithDeoptimization(block, array_length, base, min_c, max_c); + for (HBoundsCheck* other_bounds_check : candidates) { + // Only replace if still in the graph. This avoids visiting the same + // bounds check twice if it occurred multiple times in the use list. + if (other_bounds_check->IsInBlock()) { + ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0)); + } + } + } + } + } + + /** + * Returns true if static range analysis based on induction variables can determine the bounds + * check on the given array range is always satisfied with the computed index range. The output + * parameter try_dynamic_bce is set to false if OOB is certain. + */ + bool InductionRangeFitsIn(ValueRange* array_range, + HBoundsCheck* context, + bool* try_dynamic_bce) { + InductionVarRange::Value v1; + InductionVarRange::Value v2; + bool needs_finite_test = false; + HInstruction* index = context->InputAt(0); + HInstruction* hint = HuntForDeclaration(context->InputAt(1)); + if (induction_range_.GetInductionRange(context, index, hint, &v1, &v2, &needs_finite_test)) { + if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) && + v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) { + DCHECK(v1.a_constant == 1 || v1.instruction == nullptr); + DCHECK(v2.a_constant == 1 || v2.instruction == nullptr); + ValueRange index_range(&allocator_, + ValueBound(v1.instruction, v1.b_constant), + ValueBound(v2.instruction, v2.b_constant)); + // If analysis reveals a certain OOB, disable dynamic BCE. Otherwise, + // use analysis for static bce only if loop is finite. + if (index_range.GetLower().LessThan(array_range->GetLower()) || + index_range.GetUpper().GreaterThan(array_range->GetUpper())) { + *try_dynamic_bce = false; + } else if (!needs_finite_test && index_range.FitsIn(array_range)) { + return true; + } + } + } + return false; + } + + /** + * Performs loop-based dynamic elimination on a bounds check. In order to minimize the + * number of eventually generated tests, related bounds checks with tests that can be + * combined with tests for the given bounds check are collected first. + */ + void TransformLoopForDynamicBCE(HLoopInformation* loop, HBoundsCheck* bounds_check) { + HInstruction* index = bounds_check->InputAt(0); + HInstruction* array_length = bounds_check->InputAt(1); + DCHECK(loop->IsDefinedOutOfTheLoop(array_length)); // pre-checked + DCHECK(loop->DominatesAllBackEdges(bounds_check->GetBlock())); + // Collect all bounds checks in the same loop that are related as "a[base + constant]" + // for a base instruction (possibly absent) and various constants. + ValueBound value = ValueBound::AsValueBound(index); + HInstruction* base = value.GetInstruction(); + int32_t min_c = base == nullptr ? 0 : value.GetConstant(); + int32_t max_c = value.GetConstant(); + ScopedArenaVector candidates( + allocator_.Adapter(kArenaAllocBoundsCheckElimination)); + ScopedArenaVector standby( + allocator_.Adapter(kArenaAllocBoundsCheckElimination)); + for (const HUseListNode& use : array_length->GetUses()) { + HInstruction* user = use.GetUser(); + if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) { + HBoundsCheck* other_bounds_check = user->AsBoundsCheck(); + HInstruction* other_index = other_bounds_check->InputAt(0); + HInstruction* other_array_length = other_bounds_check->InputAt(1); + ValueBound other_value = ValueBound::AsValueBound(other_index); + int32_t other_c = other_value.GetConstant(); + if (array_length == other_array_length && base == other_value.GetInstruction()) { + // Ensure every candidate could be picked for code generation. + bool b1 = false, b2 = false; + if (!induction_range_.CanGenerateRange(other_bounds_check, other_index, &b1, &b2)) { + continue; + } + // Does the current basic block dominate all back edges? If not, + // add this candidate later only if it falls into the range. + if (!loop->DominatesAllBackEdges(user->GetBlock())) { + standby.push_back(other_bounds_check); + continue; + } + min_c = std::min(min_c, other_c); + max_c = std::max(max_c, other_c); + candidates.push_back(other_bounds_check); + } + } + } + // Add standby candidates that fall in selected range. + for (HBoundsCheck* other_bounds_check : standby) { + HInstruction* other_index = other_bounds_check->InputAt(0); + int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant(); + if (min_c <= other_c && other_c <= max_c) { + candidates.push_back(other_bounds_check); + } + } + // Perform loop-based deoptimization if it seems profitable, where we eliminate bounds + // checks and replace these with deopt checks that guard against any possible OOB. + DCHECK_LT(0u, candidates.size()); + uint32_t distance = static_cast(max_c) - static_cast(min_c); + if ((base != nullptr || min_c >= 0) && // reject certain OOB + distance <= kMaxLengthForAddingDeoptimize) { // reject likely/certain deopt + HBasicBlock* block = GetPreHeader(loop, bounds_check); + HInstruction* min_lower = nullptr; + HInstruction* min_upper = nullptr; + HInstruction* max_lower = nullptr; + HInstruction* max_upper = nullptr; + // Iterate over all bounds checks. + for (HBoundsCheck* other_bounds_check : candidates) { + // Only handle if still in the graph. This avoids visiting the same + // bounds check twice if it occurred multiple times in the use list. + if (other_bounds_check->IsInBlock()) { + HInstruction* other_index = other_bounds_check->InputAt(0); + int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant(); + // Generate code for either the maximum or minimum. Range analysis already was queried + // whether code generation on the original and, thus, related bounds check was possible. + // It handles either loop invariants (lower is not set) or unit strides. + if (other_c == max_c) { + induction_range_.GenerateRange( + other_bounds_check, other_index, GetGraph(), block, &max_lower, &max_upper); + } else if (other_c == min_c && base != nullptr) { + induction_range_.GenerateRange( + other_bounds_check, other_index, GetGraph(), block, &min_lower, &min_upper); + } + ReplaceInstruction(other_bounds_check, other_index); + } + } + // In code, using unsigned comparisons: + // (1) constants only + // if (max_upper >= a.length ) deoptimize; + // (2) two symbolic invariants + // if (min_upper > max_upper) deoptimize; unless min_c == max_c + // if (max_upper >= a.length ) deoptimize; + // (3) general case, unit strides (where lower would exceed upper for arithmetic wrap-around) + // if (min_lower > max_lower) deoptimize; unless min_c == max_c + // if (max_lower > max_upper) deoptimize; + // if (max_upper >= a.length ) deoptimize; + if (base == nullptr) { + // Constants only. + DCHECK_GE(min_c, 0); + DCHECK(min_lower == nullptr && min_upper == nullptr && + max_lower == nullptr && max_upper != nullptr); + } else if (max_lower == nullptr) { + // Two symbolic invariants. + if (min_c != max_c) { + DCHECK(min_lower == nullptr && min_upper != nullptr && + max_lower == nullptr && max_upper != nullptr); + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(min_upper, max_upper)); + } else { + DCHECK(min_lower == nullptr && min_upper == nullptr && + max_lower == nullptr && max_upper != nullptr); + } + } else { + // General case, unit strides. + if (min_c != max_c) { + DCHECK(min_lower != nullptr && min_upper != nullptr && + max_lower != nullptr && max_upper != nullptr); + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(min_lower, max_lower)); + } else { + DCHECK(min_lower == nullptr && min_upper == nullptr && + max_lower != nullptr && max_upper != nullptr); + } + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAbove(max_lower, max_upper)); + } + InsertDeoptInLoop( + loop, block, new (GetGraph()->GetAllocator()) HAboveOrEqual(max_upper, array_length)); + } else { + // TODO: if rejected, avoid doing this again for subsequent instructions in this set? + } + } + + /** + * Returns true if heuristics indicate that dynamic bce may be profitable. + */ + bool DynamicBCESeemsProfitable(HLoopInformation* loop, HBasicBlock* block) { + if (loop != nullptr) { + // The loop preheader of an irreducible loop does not dominate all the blocks in + // the loop. We would need to find the common dominator of all blocks in the loop. + if (loop->IsIrreducible()) { + return false; + } + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (GetGraph()->IsCompilingOsr()) { + return false; + } + // A try boundary preheader is hard to handle. + // TODO: remove this restriction. + if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) { + return false; + } + // Does loop have early-exits? If so, the full range may not be covered by the loop + // at runtime and testing the range may apply deoptimization unnecessarily. + if (IsEarlyExitLoop(loop)) { + return false; + } + // Does the current basic block dominate all back edges? If not, + // don't apply dynamic bce to something that may not be executed. + return loop->DominatesAllBackEdges(block); + } + return false; + } + + /** + * Returns true if the loop has early exits, which implies it may not cover + * the full range computed by range analysis based on induction variables. + */ + bool IsEarlyExitLoop(HLoopInformation* loop) { + const uint32_t loop_id = loop->GetHeader()->GetBlockId(); + // If loop has been analyzed earlier for early-exit, don't repeat the analysis. + auto it = early_exit_loop_.find(loop_id); + if (it != early_exit_loop_.end()) { + return it->second; + } + // First time early-exit analysis for this loop. Since analysis requires scanning + // the full loop-body, results of the analysis is stored for subsequent queries. + HBlocksInLoopReversePostOrderIterator it_loop(*loop); + for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) { + for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) { + if (!loop->Contains(*successor)) { + early_exit_loop_.Put(loop_id, true); + return true; + } + } + } + early_exit_loop_.Put(loop_id, false); + return false; + } + + /** + * Returns true if the array length is already loop invariant, or can be made so + * by handling the null check under the hood of the array length operation. + */ + bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) { + if (loop->IsDefinedOutOfTheLoop(length)) { + return true; + } else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) { + if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) { + HoistToPreHeaderOrDeoptBlock(loop, length); + return true; + } + } + return false; + } + + /** + * Returns true if the null check is already loop invariant, or can be made so + * by generating a deoptimization test. + */ + bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) { + if (loop->IsDefinedOutOfTheLoop(check)) { + return true; + } else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) { + HInstruction* array = check->InputAt(0); + if (loop->IsDefinedOutOfTheLoop(array)) { + // Generate: if (array == null) deoptimize; + TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test); + HBasicBlock* block = GetPreHeader(loop, check); + HInstruction* cond = + new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant()); + InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true); + ReplaceInstruction(check, array); + return true; + } + } + return false; + } + + /** + * Returns true if compiler can apply dynamic bce to loops that may be infinite + * (e.g. for (int i = 0; i <= U; i++) with U = MAX_INT), which would invalidate + * the range analysis evaluation code by "overshooting" the computed range. + * Since deoptimization would be a bad choice, and there is no other version + * of the loop to use, dynamic bce in such cases is only allowed if other tests + * ensure the loop is finite. + */ + bool CanHandleInfiniteLoop(HLoopInformation* loop, HInstruction* index, bool needs_infinite_test) { + if (needs_infinite_test) { + // If we already forced the loop to be finite, allow directly. + const uint32_t loop_id = loop->GetHeader()->GetBlockId(); + if (finite_loop_.find(loop_id) != finite_loop_.end()) { + return true; + } + // Otherwise, allow dynamic bce if the index (which is necessarily an induction at + // this point) is the direct loop index (viz. a[i]), since then the runtime tests + // ensure upper bound cannot cause an infinite loop. + HInstruction* control = loop->GetHeader()->GetLastInstruction(); + if (control->IsIf()) { + HInstruction* if_expr = control->AsIf()->InputAt(0); + if (if_expr->IsCondition()) { + HCondition* condition = if_expr->AsCondition(); + if (index == condition->InputAt(0) || + index == condition->InputAt(1)) { + finite_loop_.insert(loop_id); + return true; + } + } + } + return false; + } + return true; + } + + /** + * Returns appropriate preheader for the loop, depending on whether the + * instruction appears in the loop header or proper loop-body. + */ + HBasicBlock* GetPreHeader(HLoopInformation* loop, HInstruction* instruction) { + // Use preheader unless there is an earlier generated deoptimization block since + // hoisted expressions may depend on and/or used by the deoptimization tests. + HBasicBlock* header = loop->GetHeader(); + const uint32_t loop_id = header->GetBlockId(); + auto it = taken_test_loop_.find(loop_id); + if (it != taken_test_loop_.end()) { + HBasicBlock* block = it->second; + // If always taken, keep it that way by returning the original preheader, + // which can be found by following the predecessor of the true-block twice. + if (instruction->GetBlock() == header) { + return block->GetSinglePredecessor()->GetSinglePredecessor(); + } + return block; + } + return loop->GetPreHeader(); + } + + /** Inserts a deoptimization test in a loop preheader. */ + void InsertDeoptInLoop(HLoopInformation* loop, + HBasicBlock* block, + HInstruction* condition, + bool is_null_check = false) { + HInstruction* suspend = loop->GetSuspendCheck(); + block->InsertInstructionBefore(condition, block->GetLastInstruction()); + DeoptimizationKind kind = + is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE; + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), condition, kind, suspend->GetDexPc()); + block->InsertInstructionBefore(deoptimize, block->GetLastInstruction()); + if (suspend->HasEnvironment()) { + deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( + suspend->GetEnvironment(), loop->GetHeader()); + } + } + + /** Inserts a deoptimization test right before a bounds check. */ + void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) { + HBasicBlock* block = bounds_check->GetBlock(); + block->InsertInstructionBefore(condition, bounds_check); + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), + condition, + DeoptimizationKind::kBlockBCE, + bounds_check->GetDexPc()); + block->InsertInstructionBefore(deoptimize, bounds_check); + deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment()); + } + + /** Hoists instruction out of the loop to preheader or deoptimization block. */ + void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) { + HBasicBlock* block = GetPreHeader(loop, instruction); + DCHECK(!instruction->HasEnvironment()); + instruction->MoveBefore(block->GetLastInstruction()); + } + + /** + * Adds a new taken-test structure to a loop if needed and not already done. + * The taken-test protects range analysis evaluation code to avoid any + * deoptimization caused by incorrect trip-count evaluation in non-taken loops. + * + * old_preheader + * | + * if_block <- taken-test protects deoptimization block + * / \ + * true_block false_block <- deoptimizations/invariants are placed in true_block + * \ / + * new_preheader <- may require phi nodes to preserve SSA structure + * | + * header + * + * For example, this loop: + * + * for (int i = lower; i < upper; i++) { + * array[i] = 0; + * } + * + * will be transformed to: + * + * if (lower < upper) { + * if (array == null) deoptimize; + * array_length = array.length; + * if (lower > upper) deoptimize; // unsigned + * if (upper >= array_length) deoptimize; // unsigned + * } else { + * array_length = 0; + * } + * for (int i = lower; i < upper; i++) { + * // Loop without null check and bounds check, and any array.length replaced with array_length. + * array[i] = 0; + * } + */ + void TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) { + // Not needed (can use preheader) or already done (can reuse)? + const uint32_t loop_id = loop->GetHeader()->GetBlockId(); + if (!needs_taken_test || taken_test_loop_.find(loop_id) != taken_test_loop_.end()) { + return; + } + + // Generate top test structure. + HBasicBlock* header = loop->GetHeader(); + GetGraph()->TransformLoopHeaderForBCE(header); + HBasicBlock* new_preheader = loop->GetPreHeader(); + HBasicBlock* if_block = new_preheader->GetDominator(); + HBasicBlock* true_block = if_block->GetSuccessors()[0]; // True successor. + HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor. + + // Goto instructions. + true_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); + false_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); + new_preheader->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); + + // Insert the taken-test to see if the loop body is entered. If the + // loop isn't entered at all, it jumps around the deoptimization block. + if_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // placeholder + HInstruction* condition = induction_range_.GenerateTakenTest( + header->GetLastInstruction(), GetGraph(), if_block); + DCHECK(condition != nullptr); + if_block->RemoveInstruction(if_block->GetLastInstruction()); + if_block->AddInstruction(new (GetGraph()->GetAllocator()) HIf(condition)); + + taken_test_loop_.Put(loop_id, true_block); + } + + /** + * Inserts phi nodes that preserve SSA structure in generated top test structures. + * All uses of instructions in the deoptimization block that reach the loop need + * a phi node in the new loop preheader to fix the dominance relation. + * + * Example: + * if_block + * / \ + * x_0 = .. false_block + * \ / + * x_1 = phi(x_0, null) <- synthetic phi + * | + * new_preheader + */ + void InsertPhiNodes() { + // Scan all new deoptimization blocks. + for (const auto& entry : taken_test_loop_) { + HBasicBlock* true_block = entry.second; + HBasicBlock* new_preheader = true_block->GetSingleSuccessor(); + // Scan all instructions in a new deoptimization block. + for (HInstructionIterator it(true_block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* instruction = it.Current(); + DataType::Type type = instruction->GetType(); + HPhi* phi = nullptr; + // Scan all uses of an instruction and replace each later use with a phi node. + const HUseList& uses = instruction->GetUses(); + for (auto it2 = uses.begin(), end2 = uses.end(); it2 != end2; /* ++it2 below */) { + HInstruction* user = it2->GetUser(); + size_t index = it2->GetIndex(); + // Increment `it2` now because `*it2` may disappear thanks to user->ReplaceInput(). + ++it2; + if (user->GetBlock() != true_block) { + if (phi == nullptr) { + phi = NewPhi(new_preheader, instruction, type); + } + user->ReplaceInput(phi, index); // Removes the use node from the list. + induction_range_.Replace(user, instruction, phi); // update induction + } + } + // Scan all environment uses of an instruction and replace each later use with a phi node. + const HUseList& env_uses = instruction->GetEnvUses(); + for (auto it2 = env_uses.begin(), end2 = env_uses.end(); it2 != end2; /* ++it2 below */) { + HEnvironment* user = it2->GetUser(); + size_t index = it2->GetIndex(); + // Increment `it2` now because `*it2` may disappear thanks to user->RemoveAsUserOfInput(). + ++it2; + if (user->GetHolder()->GetBlock() != true_block) { + if (phi == nullptr) { + phi = NewPhi(new_preheader, instruction, type); + } + user->RemoveAsUserOfInput(index); + user->SetRawEnvAt(index, phi); + phi->AddEnvUseAt(user, index); + } + } + } + } + } + + /** + * Construct a phi(instruction, 0) in the new preheader to fix the dominance relation. + * These are synthetic phi nodes without a virtual register. + */ + HPhi* NewPhi(HBasicBlock* new_preheader, + HInstruction* instruction, + DataType::Type type) { + HGraph* graph = GetGraph(); + HInstruction* zero; + switch (type) { + case DataType::Type::kReference: zero = graph->GetNullConstant(); break; + case DataType::Type::kFloat32: zero = graph->GetFloatConstant(0); break; + case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break; + default: zero = graph->GetConstant(type, 0); break; + } + HPhi* phi = new (graph->GetAllocator()) + HPhi(graph->GetAllocator(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type)); + phi->SetRawInputAt(0, instruction); + phi->SetRawInputAt(1, zero); + if (type == DataType::Type::kReference) { + phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo()); + } + new_preheader->AddPhi(phi); + return phi; + } + + /** Helper method to replace an instruction with another instruction. */ + void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) { + // Safe iteration. + if (instruction == next_) { + next_ = next_->GetNext(); + } + // Replace and remove. + instruction->ReplaceWith(replacement); + instruction->GetBlock()->RemoveInstruction(instruction); + } + + // Use local allocator for allocating memory. + ScopedArenaAllocator allocator_; + + // A set of maps, one per basic block, from instruction to range. + ScopedArenaVector> maps_; + + // Map an HArrayLength instruction's id to the first HBoundsCheck instruction + // in a block that checks an index against that HArrayLength. + ScopedArenaSafeMap first_index_bounds_check_map_; + + // Early-exit loop bookkeeping. + ScopedArenaSafeMap early_exit_loop_; + + // Taken-test loop bookkeeping. + ScopedArenaSafeMap taken_test_loop_; + + // Finite loop bookkeeping. + ScopedArenaSet finite_loop_; + + // Flag that denotes whether dominator-based dynamic elimination has occurred. + bool has_dom_based_dynamic_bce_; + + // Initial number of blocks. + uint32_t initial_block_size_; + + // Side effects. + const SideEffectsAnalysis& side_effects_; + + // Range analysis based on induction variables. + InductionVarRange induction_range_; + + // Safe iteration. + HInstruction* next_; + + DISALLOW_COPY_AND_ASSIGN(BCEVisitor); +}; + +bool BoundsCheckElimination::Run() { + if (!graph_->HasBoundsChecks()) { + return false; + } + + // Reverse post order guarantees a node's dominators are visited first. + // We want to visit in the dominator-based order since if a value is known to + // be bounded by a range at one instruction, it must be true that all uses of + // that value dominated by that instruction fits in that range. Range of that + // value can be narrowed further down in the dominator tree. + BCEVisitor visitor(graph_, side_effects_, induction_analysis_); + for (size_t i = 0, size = graph_->GetReversePostOrder().size(); i != size; ++i) { + HBasicBlock* current = graph_->GetReversePostOrder()[i]; + if (visitor.IsAddedBlock(current)) { + // Skip added blocks. Their effects are already taken care of. + continue; + } + visitor.VisitBasicBlock(current); + // Skip forward to the current block in case new basic blocks were inserted + // (which always appear earlier in reverse post order) to avoid visiting the + // same basic block twice. + size_t new_size = graph_->GetReversePostOrder().size(); + DCHECK_GE(new_size, size); + i += new_size - size; + DCHECK_EQ(current, graph_->GetReversePostOrder()[i]); + size = new_size; + } + + // Perform cleanup. + visitor.Finish(); + + return true; +} + +} // namespace art diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h new file mode 100644 index 0000000..ef08877 --- /dev/null +++ b/compiler/optimizing/bounds_check_elimination.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_BOUNDS_CHECK_ELIMINATION_H_ +#define ART_COMPILER_OPTIMIZING_BOUNDS_CHECK_ELIMINATION_H_ + +#include "optimization.h" + +namespace art { + +class SideEffectsAnalysis; +class HInductionVarAnalysis; + +class BoundsCheckElimination : public HOptimization { + public: + BoundsCheckElimination(HGraph* graph, + const SideEffectsAnalysis& side_effects, + HInductionVarAnalysis* induction_analysis, + const char* name = kBoundsCheckEliminationPassName) + : HOptimization(graph, name), + side_effects_(side_effects), + induction_analysis_(induction_analysis) {} + + bool Run() override; + + static constexpr const char* kBoundsCheckEliminationPassName = "BCE"; + + private: + const SideEffectsAnalysis& side_effects_; + HInductionVarAnalysis* induction_analysis_; + + DISALLOW_COPY_AND_ASSIGN(BoundsCheckElimination); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_BOUNDS_CHECK_ELIMINATION_H_ diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc new file mode 100644 index 0000000..5927d68 --- /dev/null +++ b/compiler/optimizing/bounds_check_elimination_test.cc @@ -0,0 +1,1109 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bounds_check_elimination.h" + +#include "base/arena_allocator.h" +#include "builder.h" +#include "gvn.h" +#include "induction_var_analysis.h" +#include "instruction_simplifier.h" +#include "nodes.h" +#include "optimizing_unit_test.h" +#include "side_effects_analysis.h" + +#include "gtest/gtest.h" + +namespace art { + +/** + * Fixture class for the BoundsCheckElimination tests. + */ +class BoundsCheckEliminationTest : public OptimizingUnitTest { + public: + BoundsCheckEliminationTest() : graph_(CreateGraph()) { + graph_->SetHasBoundsChecks(true); + } + + ~BoundsCheckEliminationTest() { } + + void RunBCE() { + graph_->BuildDominatorTree(); + + InstructionSimplifier(graph_, /* codegen= */ nullptr).Run(); + + SideEffectsAnalysis side_effects(graph_); + side_effects.Run(); + + GVNOptimization(graph_, side_effects).Run(); + + HInductionVarAnalysis induction(graph_); + induction.Run(); + + BoundsCheckElimination(graph_, side_effects, &induction).Run(); + } + + HGraph* graph_; +}; + + +// if (i < 0) { array[i] = 1; // Can't eliminate. } +// else if (i >= array.length) { array[i] = 1; // Can't eliminate. } +// else { array[i] = 1; // Can eliminate. } +TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i + entry->AddInstruction(parameter1); + entry->AddInstruction(parameter2); + + HInstruction* constant_1 = graph_->GetIntConstant(1); + HInstruction* constant_0 = graph_->GetIntConstant(0); + + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block1); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, constant_0); + HIf* if_inst = new (GetAllocator()) HIf(cmp); + block1->AddInstruction(cmp); + block1->AddInstruction(if_inst); + entry->AddSuccessor(block1); + + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block2); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check2 = new (GetAllocator()) + HBoundsCheck(parameter2, array_length, 0); + HArraySet* array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0); + block2->AddInstruction(null_check); + block2->AddInstruction(array_length); + block2->AddInstruction(bounds_check2); + block2->AddInstruction(array_set); + + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block3); + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + cmp = new (GetAllocator()) HLessThan(parameter2, array_length); + if_inst = new (GetAllocator()) HIf(cmp); + block3->AddInstruction(null_check); + block3->AddInstruction(array_length); + block3->AddInstruction(cmp); + block3->AddInstruction(if_inst); + + HBasicBlock* block4 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block4); + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check4 = new (GetAllocator()) + HBoundsCheck(parameter2, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); + block4->AddInstruction(null_check); + block4->AddInstruction(array_length); + block4->AddInstruction(bounds_check4); + block4->AddInstruction(array_set); + + HBasicBlock* block5 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block5); + null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check5 = new (GetAllocator()) + HBoundsCheck(parameter2, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); + block5->AddInstruction(null_check); + block5->AddInstruction(array_length); + block5->AddInstruction(bounds_check5); + block5->AddInstruction(array_set); + + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit); + block2->AddSuccessor(exit); + block4->AddSuccessor(exit); + block5->AddSuccessor(exit); + exit->AddInstruction(new (GetAllocator()) HExit()); + + block1->AddSuccessor(block3); // True successor + block1->AddSuccessor(block2); // False successor + + block3->AddSuccessor(block5); // True successor + block3->AddSuccessor(block4); // False successor + + RunBCE(); + + ASSERT_FALSE(IsRemoved(bounds_check2)); + ASSERT_FALSE(IsRemoved(bounds_check4)); + ASSERT_TRUE(IsRemoved(bounds_check5)); +} + +// if (i > 0) { +// // Positive number plus MAX_INT will overflow and be negative. +// int j = i + Integer.MAX_VALUE; +// if (j < array.length) array[j] = 1; // Can't eliminate. +// } +TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i + entry->AddInstruction(parameter1); + entry->AddInstruction(parameter2); + + HInstruction* constant_1 = graph_->GetIntConstant(1); + HInstruction* constant_0 = graph_->GetIntConstant(0); + HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX); + + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block1); + HInstruction* cmp = new (GetAllocator()) HLessThanOrEqual(parameter2, constant_0); + HIf* if_inst = new (GetAllocator()) HIf(cmp); + block1->AddInstruction(cmp); + block1->AddInstruction(if_inst); + entry->AddSuccessor(block1); + + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block2); + HInstruction* add = + new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter2, constant_max_int); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* cmp2 = new (GetAllocator()) HGreaterThanOrEqual(add, array_length); + if_inst = new (GetAllocator()) HIf(cmp2); + block2->AddInstruction(add); + block2->AddInstruction(null_check); + block2->AddInstruction(array_length); + block2->AddInstruction(cmp2); + block2->AddInstruction(if_inst); + + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block3); + HBoundsCheck* bounds_check = new (GetAllocator()) + HBoundsCheck(add, array_length, 0); + HArraySet* array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); + block3->AddInstruction(bounds_check); + block3->AddInstruction(array_set); + + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit); + exit->AddInstruction(new (GetAllocator()) HExit()); + block1->AddSuccessor(exit); // true successor + block1->AddSuccessor(block2); // false successor + block2->AddSuccessor(exit); // true successor + block2->AddSuccessor(block3); // false successor + block3->AddSuccessor(exit); + + RunBCE(); + + ASSERT_FALSE(IsRemoved(bounds_check)); +} + +// if (i < array.length) { +// int j = i - Integer.MAX_VALUE; +// j = j - Integer.MAX_VALUE; // j is (i+2) after subtracting MAX_INT twice +// if (j > 0) array[j] = 1; // Can't eliminate. +// } +TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* parameter1 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array + HInstruction* parameter2 = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i + entry->AddInstruction(parameter1); + entry->AddInstruction(parameter2); + + HInstruction* constant_1 = graph_->GetIntConstant(1); + HInstruction* constant_0 = graph_->GetIntConstant(0); + HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX); + + HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block1); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, array_length); + HIf* if_inst = new (GetAllocator()) HIf(cmp); + block1->AddInstruction(null_check); + block1->AddInstruction(array_length); + block1->AddInstruction(cmp); + block1->AddInstruction(if_inst); + entry->AddSuccessor(block1); + + HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block2); + HInstruction* sub1 = + new (GetAllocator()) HSub(DataType::Type::kInt32, parameter2, constant_max_int); + HInstruction* sub2 = new (GetAllocator()) HSub(DataType::Type::kInt32, sub1, constant_max_int); + HInstruction* cmp2 = new (GetAllocator()) HLessThanOrEqual(sub2, constant_0); + if_inst = new (GetAllocator()) HIf(cmp2); + block2->AddInstruction(sub1); + block2->AddInstruction(sub2); + block2->AddInstruction(cmp2); + block2->AddInstruction(if_inst); + + HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block3); + HBoundsCheck* bounds_check = new (GetAllocator()) + HBoundsCheck(sub2, array_length, 0); + HArraySet* array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check, constant_1, DataType::Type::kInt32, 0); + block3->AddInstruction(bounds_check); + block3->AddInstruction(array_set); + + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit); + exit->AddInstruction(new (GetAllocator()) HExit()); + block1->AddSuccessor(exit); // true successor + block1->AddSuccessor(block2); // false successor + block2->AddSuccessor(exit); // true successor + block2->AddSuccessor(block3); // false successor + block3->AddSuccessor(exit); + + RunBCE(); + + ASSERT_FALSE(IsRemoved(bounds_check)); +} + +// array[6] = 1; // Can't eliminate. +// array[5] = 1; // Can eliminate. +// array[4] = 1; // Can eliminate. +TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* parameter = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + entry->AddInstruction(parameter); + + HInstruction* constant_5 = graph_->GetIntConstant(5); + HInstruction* constant_4 = graph_->GetIntConstant(4); + HInstruction* constant_6 = graph_->GetIntConstant(6); + HInstruction* constant_1 = graph_->GetIntConstant(1); + + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block); + entry->AddSuccessor(block); + + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check6 = new (GetAllocator()) + HBoundsCheck(constant_6, array_length, 0); + HInstruction* array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0); + block->AddInstruction(null_check); + block->AddInstruction(array_length); + block->AddInstruction(bounds_check6); + block->AddInstruction(array_set); + + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check5 = new (GetAllocator()) + HBoundsCheck(constant_5, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0); + block->AddInstruction(null_check); + block->AddInstruction(array_length); + block->AddInstruction(bounds_check5); + block->AddInstruction(array_set); + + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check4 = new (GetAllocator()) + HBoundsCheck(constant_4, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0); + block->AddInstruction(null_check); + block->AddInstruction(array_length); + block->AddInstruction(bounds_check4); + block->AddInstruction(array_set); + + block->AddInstruction(new (GetAllocator()) HGoto()); + + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit); + block->AddSuccessor(exit); + exit->AddInstruction(new (GetAllocator()) HExit()); + + RunBCE(); + + ASSERT_FALSE(IsRemoved(bounds_check6)); + ASSERT_TRUE(IsRemoved(bounds_check5)); + ASSERT_TRUE(IsRemoved(bounds_check4)); +} + +// for (int i=initial; iAddBlock(entry); + graph->SetEntryBlock(entry); + HInstruction* parameter = new (allocator) HParameterValue( + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + entry->AddInstruction(parameter); + + HInstruction* constant_initial = graph->GetIntConstant(initial); + HInstruction* constant_increment = graph->GetIntConstant(increment); + HInstruction* constant_10 = graph->GetIntConstant(10); + + HBasicBlock* block = new (allocator) HBasicBlock(graph); + graph->AddBlock(block); + entry->AddSuccessor(block); + block->AddInstruction(new (allocator) HGoto()); + + HBasicBlock* loop_header = new (allocator) HBasicBlock(graph); + HBasicBlock* loop_body = new (allocator) HBasicBlock(graph); + HBasicBlock* exit = new (allocator) HBasicBlock(graph); + + graph->AddBlock(loop_header); + graph->AddBlock(loop_body); + graph->AddBlock(exit); + block->AddSuccessor(loop_header); + loop_header->AddSuccessor(exit); // true successor + loop_header->AddSuccessor(loop_body); // false successor + loop_body->AddSuccessor(loop_header); + + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); + HInstruction* null_check = new (allocator) HNullCheck(parameter, 0); + HInstruction* array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* cmp = nullptr; + if (cond == kCondGE) { + cmp = new (allocator) HGreaterThanOrEqual(phi, array_length); + } else { + DCHECK(cond == kCondGT); + cmp = new (allocator) HGreaterThan(phi, array_length); + } + HInstruction* if_inst = new (allocator) HIf(cmp); + loop_header->AddPhi(phi); + loop_header->AddInstruction(null_check); + loop_header->AddInstruction(array_length); + loop_header->AddInstruction(cmp); + loop_header->AddInstruction(if_inst); + phi->AddInput(constant_initial); + + null_check = new (allocator) HNullCheck(parameter, 0); + array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0); + HInstruction* array_set = new (allocator) HArraySet( + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(bounds_check); + loop_body->AddInstruction(array_set); + loop_body->AddInstruction(add); + loop_body->AddInstruction(new (allocator) HGoto()); + phi->AddInput(add); + + exit->AddInstruction(new (allocator) HExit()); + + return bounds_check; +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) { + // for (int i=0; i0; i+=increment) { array[i-1] = 10; } +static HInstruction* BuildSSAGraph2(HGraph *graph, + ArenaAllocator* allocator, + int initial, + int increment = -1, + IfCondition cond = kCondLE) { + HBasicBlock* entry = new (allocator) HBasicBlock(graph); + graph->AddBlock(entry); + graph->SetEntryBlock(entry); + HInstruction* parameter = new (allocator) HParameterValue( + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + entry->AddInstruction(parameter); + + HInstruction* constant_initial = graph->GetIntConstant(initial); + HInstruction* constant_increment = graph->GetIntConstant(increment); + HInstruction* constant_minus_1 = graph->GetIntConstant(-1); + HInstruction* constant_10 = graph->GetIntConstant(10); + + HBasicBlock* block = new (allocator) HBasicBlock(graph); + graph->AddBlock(block); + entry->AddSuccessor(block); + HInstruction* null_check = new (allocator) HNullCheck(parameter, 0); + HInstruction* array_length = new (allocator) HArrayLength(null_check, 0); + block->AddInstruction(null_check); + block->AddInstruction(array_length); + block->AddInstruction(new (allocator) HGoto()); + + HBasicBlock* loop_header = new (allocator) HBasicBlock(graph); + HBasicBlock* loop_body = new (allocator) HBasicBlock(graph); + HBasicBlock* exit = new (allocator) HBasicBlock(graph); + + graph->AddBlock(loop_header); + graph->AddBlock(loop_body); + graph->AddBlock(exit); + block->AddSuccessor(loop_header); + loop_header->AddSuccessor(exit); // true successor + loop_header->AddSuccessor(loop_body); // false successor + loop_body->AddSuccessor(loop_header); + + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); + HInstruction* cmp = nullptr; + if (cond == kCondLE) { + cmp = new (allocator) HLessThanOrEqual(phi, constant_initial); + } else { + DCHECK(cond == kCondLT); + cmp = new (allocator) HLessThan(phi, constant_initial); + } + HInstruction* if_inst = new (allocator) HIf(cmp); + loop_header->AddPhi(phi); + loop_header->AddInstruction(cmp); + loop_header->AddInstruction(if_inst); + phi->AddInput(array_length); + + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_minus_1); + null_check = new (allocator) HNullCheck(parameter, 0); + array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0); + HInstruction* array_set = new (allocator) HArraySet( + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add_phi = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); + loop_body->AddInstruction(add); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(bounds_check); + loop_body->AddInstruction(array_set); + loop_body->AddInstruction(add_phi); + loop_body->AddInstruction(new (allocator) HGoto()); + phi->AddInput(add); + + exit->AddInstruction(new (allocator) HExit()); + + return bounds_check; +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) { + // for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. } + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) { + // for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. } + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 1); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) { + // for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. } + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), -1); + RunBCE(); + ASSERT_FALSE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) { + // for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. } + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -1, kCondLT); + RunBCE(); + ASSERT_FALSE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) { + // for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. } + HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -2); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +// int[] array = new int[10]; +// for (int i=0; i<10; i+=increment) { array[i] = 10; } +static HInstruction* BuildSSAGraph3(HGraph* graph, + ArenaAllocator* allocator, + int initial, + int increment, + IfCondition cond) { + HBasicBlock* entry = new (allocator) HBasicBlock(graph); + graph->AddBlock(entry); + graph->SetEntryBlock(entry); + + HInstruction* constant_10 = graph->GetIntConstant(10); + HInstruction* constant_initial = graph->GetIntConstant(initial); + HInstruction* constant_increment = graph->GetIntConstant(increment); + + HBasicBlock* block = new (allocator) HBasicBlock(graph); + graph->AddBlock(block); + entry->AddSuccessor(block); + // We pass a bogus constant for the class to avoid mocking one. + HInstruction* new_array = new (allocator) HNewArray( + /* cls= */ constant_10, + /* length= */ constant_10, + /* dex_pc= */ 0, + /* component_size_shift= */ 0); + block->AddInstruction(new_array); + block->AddInstruction(new (allocator) HGoto()); + + HBasicBlock* loop_header = new (allocator) HBasicBlock(graph); + HBasicBlock* loop_body = new (allocator) HBasicBlock(graph); + HBasicBlock* exit = new (allocator) HBasicBlock(graph); + + graph->AddBlock(loop_header); + graph->AddBlock(loop_body); + graph->AddBlock(exit); + block->AddSuccessor(loop_header); + loop_header->AddSuccessor(exit); // true successor + loop_header->AddSuccessor(loop_body); // false successor + loop_body->AddSuccessor(loop_header); + + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); + HInstruction* cmp = nullptr; + if (cond == kCondGE) { + cmp = new (allocator) HGreaterThanOrEqual(phi, constant_10); + } else { + DCHECK(cond == kCondGT); + cmp = new (allocator) HGreaterThan(phi, constant_10); + } + HInstruction* if_inst = new (allocator) HIf(cmp); + loop_header->AddPhi(phi); + loop_header->AddInstruction(cmp); + loop_header->AddInstruction(if_inst); + phi->AddInput(constant_initial); + + HNullCheck* null_check = new (allocator) HNullCheck(new_array, 0); + HArrayLength* array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0); + HInstruction* array_set = new (allocator) HArraySet( + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_increment); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(bounds_check); + loop_body->AddInstruction(array_set); + loop_body->AddInstruction(add); + loop_body->AddInstruction(new (allocator) HGoto()); + phi->AddInput(add); + + exit->AddInstruction(new (allocator) HExit()); + + return bounds_check; +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) { + // int[] array = new int[10]; + // for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. } + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGE); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) { + // int[] array = new int[10]; + // for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. } + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 1, kCondGE); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) { + // int[] array = new int[10]; + // for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. } + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGT); + RunBCE(); + ASSERT_FALSE(IsRemoved(bounds_check)); +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) { + // int[] array = new int[10]; + // for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. } + HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 8, kCondGE); + RunBCE(); + ASSERT_TRUE(IsRemoved(bounds_check)); +} + +// for (int i=initial; iAddBlock(entry); + graph->SetEntryBlock(entry); + HInstruction* parameter = new (allocator) HParameterValue( + graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + entry->AddInstruction(parameter); + + HInstruction* constant_initial = graph->GetIntConstant(initial); + HInstruction* constant_1 = graph->GetIntConstant(1); + HInstruction* constant_10 = graph->GetIntConstant(10); + HInstruction* constant_minus_1 = graph->GetIntConstant(-1); + + HBasicBlock* block = new (allocator) HBasicBlock(graph); + graph->AddBlock(block); + entry->AddSuccessor(block); + block->AddInstruction(new (allocator) HGoto()); + + HBasicBlock* loop_header = new (allocator) HBasicBlock(graph); + HBasicBlock* loop_body = new (allocator) HBasicBlock(graph); + HBasicBlock* exit = new (allocator) HBasicBlock(graph); + + graph->AddBlock(loop_header); + graph->AddBlock(loop_body); + graph->AddBlock(exit); + block->AddSuccessor(loop_header); + loop_header->AddSuccessor(exit); // true successor + loop_header->AddSuccessor(loop_body); // false successor + loop_body->AddSuccessor(loop_header); + + HPhi* phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32); + HInstruction* null_check = new (allocator) HNullCheck(parameter, 0); + HInstruction* array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* cmp = nullptr; + if (cond == kCondGE) { + cmp = new (allocator) HGreaterThanOrEqual(phi, array_length); + } else if (cond == kCondGT) { + cmp = new (allocator) HGreaterThan(phi, array_length); + } + HInstruction* if_inst = new (allocator) HIf(cmp); + loop_header->AddPhi(phi); + loop_header->AddInstruction(null_check); + loop_header->AddInstruction(array_length); + loop_header->AddInstruction(cmp); + loop_header->AddInstruction(if_inst); + phi->AddInput(constant_initial); + + null_check = new (allocator) HNullCheck(parameter, 0); + array_length = new (allocator) HArrayLength(null_check, 0); + HInstruction* sub = new (allocator) HSub(DataType::Type::kInt32, array_length, phi); + HInstruction* add_minus_1 = new (allocator) + HAdd(DataType::Type::kInt32, sub, constant_minus_1); + HInstruction* bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0); + HInstruction* array_set = new (allocator) HArraySet( + null_check, bounds_check, constant_10, DataType::Type::kInt32, 0); + HInstruction* add = new (allocator) HAdd(DataType::Type::kInt32, phi, constant_1); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(sub); + loop_body->AddInstruction(add_minus_1); + loop_body->AddInstruction(bounds_check); + loop_body->AddInstruction(array_set); + loop_body->AddInstruction(add); + loop_body->AddInstruction(new (allocator) HGoto()); + phi->AddInput(add); + + exit->AddInstruction(new (allocator) HExit()); + + return bounds_check; +} + +TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) { + // for (int i=0; i array[j+1]) { +// int temp = array[j+1]; +// array[j+1] = array[j]; +// array[j] = temp; +// } +// } +// } +TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* parameter = new (GetAllocator()) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); + entry->AddInstruction(parameter); + + HInstruction* constant_0 = graph_->GetIntConstant(0); + HInstruction* constant_minus_1 = graph_->GetIntConstant(-1); + HInstruction* constant_1 = graph_->GetIntConstant(1); + + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block); + entry->AddSuccessor(block); + block->AddInstruction(new (GetAllocator()) HGoto()); + + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(exit); + exit->AddInstruction(new (GetAllocator()) HExit()); + + HBasicBlock* outer_header = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(outer_header); + HPhi* phi_i = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HAdd* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_length, constant_minus_1); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_i, add); + HIf* if_inst = new (GetAllocator()) HIf(cmp); + outer_header->AddPhi(phi_i); + outer_header->AddInstruction(null_check); + outer_header->AddInstruction(array_length); + outer_header->AddInstruction(add); + outer_header->AddInstruction(cmp); + outer_header->AddInstruction(if_inst); + phi_i->AddInput(constant_0); + + HBasicBlock* inner_header = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(inner_header); + HPhi* phi_j = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HSub* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, array_length, phi_i); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, sub, constant_minus_1); + cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_j, add); + if_inst = new (GetAllocator()) HIf(cmp); + inner_header->AddPhi(phi_j); + inner_header->AddInstruction(null_check); + inner_header->AddInstruction(array_length); + inner_header->AddInstruction(sub); + inner_header->AddInstruction(add); + inner_header->AddInstruction(cmp); + inner_header->AddInstruction(if_inst); + phi_j->AddInput(constant_0); + + HBasicBlock* inner_body_compare = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(inner_body_compare); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check1 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + HArrayGet* array_get_j = new (GetAllocator()) + HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0); + inner_body_compare->AddInstruction(null_check); + inner_body_compare->AddInstruction(array_length); + inner_body_compare->AddInstruction(bounds_check1); + inner_body_compare->AddInstruction(array_get_j); + HInstruction* j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + HArrayGet* array_get_j_plus_1 = new (GetAllocator()) + HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0); + cmp = new (GetAllocator()) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1); + if_inst = new (GetAllocator()) HIf(cmp); + inner_body_compare->AddInstruction(j_plus_1); + inner_body_compare->AddInstruction(null_check); + inner_body_compare->AddInstruction(array_length); + inner_body_compare->AddInstruction(bounds_check2); + inner_body_compare->AddInstruction(array_get_j_plus_1); + inner_body_compare->AddInstruction(cmp); + inner_body_compare->AddInstruction(if_inst); + + HBasicBlock* inner_body_swap = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(inner_body_swap); + j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); + // temp = array[j+1] + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check3 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + array_get_j_plus_1 = new (GetAllocator()) + HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0); + inner_body_swap->AddInstruction(j_plus_1); + inner_body_swap->AddInstruction(null_check); + inner_body_swap->AddInstruction(array_length); + inner_body_swap->AddInstruction(bounds_check3); + inner_body_swap->AddInstruction(array_get_j_plus_1); + // array[j+1] = array[j] + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check4 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + array_get_j = new (GetAllocator()) + HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0); + inner_body_swap->AddInstruction(null_check); + inner_body_swap->AddInstruction(array_length); + inner_body_swap->AddInstruction(bounds_check4); + inner_body_swap->AddInstruction(array_get_j); + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check5 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0); + HArraySet* array_set_j_plus_1 = new (GetAllocator()) + HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0); + inner_body_swap->AddInstruction(null_check); + inner_body_swap->AddInstruction(array_length); + inner_body_swap->AddInstruction(bounds_check5); + inner_body_swap->AddInstruction(array_set_j_plus_1); + // array[j] = temp + null_check = new (GetAllocator()) HNullCheck(parameter, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HInstruction* bounds_check6 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0); + HArraySet* array_set_j = new (GetAllocator()) + HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0); + inner_body_swap->AddInstruction(null_check); + inner_body_swap->AddInstruction(array_length); + inner_body_swap->AddInstruction(bounds_check6); + inner_body_swap->AddInstruction(array_set_j); + inner_body_swap->AddInstruction(new (GetAllocator()) HGoto()); + + HBasicBlock* inner_body_add = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(inner_body_add); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1); + inner_body_add->AddInstruction(add); + inner_body_add->AddInstruction(new (GetAllocator()) HGoto()); + phi_j->AddInput(add); + + HBasicBlock* outer_body_add = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(outer_body_add); + add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_i, constant_1); + outer_body_add->AddInstruction(add); + outer_body_add->AddInstruction(new (GetAllocator()) HGoto()); + phi_i->AddInput(add); + + block->AddSuccessor(outer_header); + outer_header->AddSuccessor(exit); + outer_header->AddSuccessor(inner_header); + inner_header->AddSuccessor(outer_body_add); + inner_header->AddSuccessor(inner_body_compare); + inner_body_compare->AddSuccessor(inner_body_add); + inner_body_compare->AddSuccessor(inner_body_swap); + inner_body_swap->AddSuccessor(inner_body_add); + inner_body_add->AddSuccessor(inner_header); + outer_body_add->AddSuccessor(outer_header); + + RunBCE(); // gvn removes same bounds check already + + ASSERT_TRUE(IsRemoved(bounds_check1)); + ASSERT_TRUE(IsRemoved(bounds_check2)); + ASSERT_TRUE(IsRemoved(bounds_check3)); + ASSERT_TRUE(IsRemoved(bounds_check4)); + ASSERT_TRUE(IsRemoved(bounds_check5)); + ASSERT_TRUE(IsRemoved(bounds_check6)); +} + +// int[] array = new int[10]; +// for (int i=0; i<200; i++) { +// array[i%10] = 10; // Can eliminate +// array[i%1] = 10; // Can eliminate +// array[i%200] = 10; // Cannot eliminate +// array[i%-10] = 10; // Can eliminate +// array[i%array.length] = 10; // Can eliminate +// array[param_i%10] = 10; // Can't eliminate, when param_i < 0 +// } +TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) { + HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(entry); + graph_->SetEntryBlock(entry); + HInstruction* param_i = new (GetAllocator()) + HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); + entry->AddInstruction(param_i); + + HInstruction* constant_0 = graph_->GetIntConstant(0); + HInstruction* constant_1 = graph_->GetIntConstant(1); + HInstruction* constant_10 = graph_->GetIntConstant(10); + HInstruction* constant_200 = graph_->GetIntConstant(200); + HInstruction* constant_minus_10 = graph_->GetIntConstant(-10); + + HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_); + graph_->AddBlock(block); + entry->AddSuccessor(block); + // We pass a bogus constant for the class to avoid mocking one. + HInstruction* new_array = new (GetAllocator()) HNewArray( + /* cls= */ constant_10, + /* length= */ constant_10, + /* dex_pc= */ 0, + /* component_size_shift= */ 0); + block->AddInstruction(new_array); + block->AddInstruction(new (GetAllocator()) HGoto()); + + HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_); + HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_); + + graph_->AddBlock(loop_header); + graph_->AddBlock(loop_body); + graph_->AddBlock(exit); + block->AddSuccessor(loop_header); + loop_header->AddSuccessor(exit); // true successor + loop_header->AddSuccessor(loop_body); // false successor + loop_body->AddSuccessor(loop_header); + + HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32); + HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi, constant_200); + HInstruction* if_inst = new (GetAllocator()) HIf(cmp); + loop_header->AddPhi(phi); + loop_header->AddInstruction(cmp); + loop_header->AddInstruction(if_inst); + phi->AddInput(constant_0); + + ////////////////////////////////////////////////////////////////////////////////// + // LOOP BODY: + // array[i % 10] = 10; + HRem* i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_10, 0); + HBoundsCheck* bounds_check_i_mod_10 = new (GetAllocator()) HBoundsCheck(i_mod_10, constant_10, 0); + HInstruction* array_set = new (GetAllocator()) HArraySet( + new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(i_mod_10); + loop_body->AddInstruction(bounds_check_i_mod_10); + loop_body->AddInstruction(array_set); + + // array[i % 1] = 10; + HRem* i_mod_1 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0); + HBoundsCheck* bounds_check_i_mod_1 = new (GetAllocator()) HBoundsCheck(i_mod_1, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( + new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(i_mod_1); + loop_body->AddInstruction(bounds_check_i_mod_1); + loop_body->AddInstruction(array_set); + + // array[i % 200] = 10; + HRem* i_mod_200 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0); + HBoundsCheck* bounds_check_i_mod_200 = new (GetAllocator()) HBoundsCheck( + i_mod_200, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( + new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(i_mod_200); + loop_body->AddInstruction(bounds_check_i_mod_200); + loop_body->AddInstruction(array_set); + + // array[i % -10] = 10; + HRem* i_mod_minus_10 = new (GetAllocator()) HRem( + DataType::Type::kInt32, phi, constant_minus_10, 0); + HBoundsCheck* bounds_check_i_mod_minus_10 = new (GetAllocator()) HBoundsCheck( + i_mod_minus_10, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( + new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(i_mod_minus_10); + loop_body->AddInstruction(bounds_check_i_mod_minus_10); + loop_body->AddInstruction(array_set); + + // array[i%array.length] = 10; + HNullCheck* null_check = new (GetAllocator()) HNullCheck(new_array, 0); + HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HRem* i_mod_array_length = new (GetAllocator()) HRem( + DataType::Type::kInt32, phi, array_length, 0); + HBoundsCheck* bounds_check_i_mod_array_len = new (GetAllocator()) HBoundsCheck( + i_mod_array_length, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(i_mod_array_length); + loop_body->AddInstruction(bounds_check_i_mod_array_len); + loop_body->AddInstruction(array_set); + + // array[param_i % 10] = 10; + HRem* param_i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, param_i, constant_10, 0); + HBoundsCheck* bounds_check_param_i_mod_10 = new (GetAllocator()) HBoundsCheck( + param_i_mod_10, constant_10, 0); + array_set = new (GetAllocator()) HArraySet( + new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(param_i_mod_10); + loop_body->AddInstruction(bounds_check_param_i_mod_10); + loop_body->AddInstruction(array_set); + + // array[param_i%array.length] = 10; + null_check = new (GetAllocator()) HNullCheck(new_array, 0); + array_length = new (GetAllocator()) HArrayLength(null_check, 0); + HRem* param_i_mod_array_length = new (GetAllocator()) HRem( + DataType::Type::kInt32, param_i, array_length, 0); + HBoundsCheck* bounds_check_param_i_mod_array_len = new (GetAllocator()) HBoundsCheck( + param_i_mod_array_length, array_length, 0); + array_set = new (GetAllocator()) HArraySet( + null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0); + loop_body->AddInstruction(null_check); + loop_body->AddInstruction(array_length); + loop_body->AddInstruction(param_i_mod_array_length); + loop_body->AddInstruction(bounds_check_param_i_mod_array_len); + loop_body->AddInstruction(array_set); + + // i++; + HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, constant_1); + loop_body->AddInstruction(add); + loop_body->AddInstruction(new (GetAllocator()) HGoto()); + phi->AddInput(add); + ////////////////////////////////////////////////////////////////////////////////// + + exit->AddInstruction(new (GetAllocator()) HExit()); + + RunBCE(); + + ASSERT_TRUE(IsRemoved(bounds_check_i_mod_10)); + ASSERT_TRUE(IsRemoved(bounds_check_i_mod_1)); + ASSERT_TRUE(IsRemoved(bounds_check_i_mod_200)); + ASSERT_TRUE(IsRemoved(bounds_check_i_mod_minus_10)); + ASSERT_TRUE(IsRemoved(bounds_check_i_mod_array_len)); + ASSERT_FALSE(IsRemoved(bounds_check_param_i_mod_10)); +} + +} // namespace art diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc new file mode 100644 index 0000000..64aa1b9 --- /dev/null +++ b/compiler/optimizing/builder.cc @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "builder.h" + +#include "art_field-inl.h" +#include "base/arena_bit_vector.h" +#include "base/bit_vector-inl.h" +#include "base/logging.h" +#include "block_builder.h" +#include "code_generator.h" +#include "data_type-inl.h" +#include "dex/verified_method.h" +#include "driver/compiler_options.h" +#include "driver/dex_compilation_unit.h" +#include "instruction_builder.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache.h" +#include "nodes.h" +#include "optimizing_compiler_stats.h" +#include "ssa_builder.h" +#include "thread.h" +#include "utils/dex_cache_arrays_layout-inl.h" + +namespace art { + +HGraphBuilder::HGraphBuilder(HGraph* graph, + const CodeItemDebugInfoAccessor& accessor, + const DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* outer_compilation_unit, + CodeGenerator* code_generator, + OptimizingCompilerStats* compiler_stats, + ArrayRef interpreter_metadata, + VariableSizedHandleScope* handles) + : graph_(graph), + dex_file_(&graph->GetDexFile()), + code_item_accessor_(accessor), + dex_compilation_unit_(dex_compilation_unit), + outer_compilation_unit_(outer_compilation_unit), + code_generator_(code_generator), + compilation_stats_(compiler_stats), + interpreter_metadata_(interpreter_metadata), + handles_(handles), + return_type_(DataType::FromShorty(dex_compilation_unit_->GetShorty()[0])) {} + +HGraphBuilder::HGraphBuilder(HGraph* graph, + const DexCompilationUnit* dex_compilation_unit, + const CodeItemDebugInfoAccessor& accessor, + VariableSizedHandleScope* handles, + DataType::Type return_type) + : graph_(graph), + dex_file_(&graph->GetDexFile()), + code_item_accessor_(accessor), + dex_compilation_unit_(dex_compilation_unit), + outer_compilation_unit_(nullptr), + code_generator_(nullptr), + compilation_stats_(nullptr), + handles_(handles), + return_type_(return_type) {} + +bool HGraphBuilder::SkipCompilation(size_t number_of_branches) { + if (code_generator_ == nullptr) { + // Note that the codegen is null when unit testing. + return false; + } + + const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions(); + CompilerFilter::Filter compiler_filter = compiler_options.GetCompilerFilter(); + if (compiler_filter == CompilerFilter::kEverything) { + return false; + } + + const uint32_t code_units = code_item_accessor_.InsnsSizeInCodeUnits(); + if (compiler_options.IsHugeMethod(code_units)) { + VLOG(compiler) << "Skip compilation of huge method " + << dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex()) + << ": " << code_units << " code units"; + MaybeRecordStat(compilation_stats_, MethodCompilationStat::kNotCompiledHugeMethod); + return true; + } + + // If it's large and contains no branches, it's likely to be machine generated initialization. + if (compiler_options.IsLargeMethod(code_units) && (number_of_branches == 0)) { + VLOG(compiler) << "Skip compilation of large method with no branch " + << dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex()) + << ": " << code_units << " code units"; + MaybeRecordStat(compilation_stats_, MethodCompilationStat::kNotCompiledLargeMethodNoBranches); + return true; + } + + return false; +} + +GraphAnalysisResult HGraphBuilder::BuildGraph() { + DCHECK(code_item_accessor_.HasCodeItem()); + DCHECK(graph_->GetBlocks().empty()); + + graph_->SetNumberOfVRegs(code_item_accessor_.RegistersSize()); + graph_->SetNumberOfInVRegs(code_item_accessor_.InsSize()); + graph_->SetMaximumNumberOfOutVRegs(code_item_accessor_.OutsSize()); + graph_->SetHasTryCatch(code_item_accessor_.TriesSize() != 0); + + // Use ScopedArenaAllocator for all local allocations. + ScopedArenaAllocator local_allocator(graph_->GetArenaStack()); + HBasicBlockBuilder block_builder(graph_, dex_file_, code_item_accessor_, &local_allocator); + SsaBuilder ssa_builder(graph_, + dex_compilation_unit_->GetClassLoader(), + dex_compilation_unit_->GetDexCache(), + handles_, + &local_allocator); + HInstructionBuilder instruction_builder(graph_, + &block_builder, + &ssa_builder, + dex_file_, + code_item_accessor_, + return_type_, + dex_compilation_unit_, + outer_compilation_unit_, + code_generator_, + interpreter_metadata_, + compilation_stats_, + handles_, + &local_allocator); + + // 1) Create basic blocks and link them together. Basic blocks are left + // unpopulated with the exception of synthetic blocks, e.g. HTryBoundaries. + if (!block_builder.Build()) { + return kAnalysisInvalidBytecode; + } + + // 2) Decide whether to skip this method based on its code size and number + // of branches. + if (SkipCompilation(block_builder.GetNumberOfBranches())) { + return kAnalysisSkipped; + } + + // 3) Build the dominator tree and fill in loop and try/catch metadata. + GraphAnalysisResult result = graph_->BuildDominatorTree(); + if (result != kAnalysisSuccess) { + return result; + } + + // 4) Populate basic blocks with instructions. + if (!instruction_builder.Build()) { + return kAnalysisInvalidBytecode; + } + + // 5) Type the graph and eliminate dead/redundant phis. + return ssa_builder.BuildSsa(); +} + +void HGraphBuilder::BuildIntrinsicGraph(ArtMethod* method) { + DCHECK(!code_item_accessor_.HasCodeItem()); + DCHECK(graph_->GetBlocks().empty()); + + // Determine the number of arguments and associated vregs. + uint32_t method_idx = dex_compilation_unit_->GetDexMethodIndex(); + const char* shorty = dex_file_->GetMethodShorty(dex_file_->GetMethodId(method_idx)); + size_t num_args = strlen(shorty + 1); + size_t num_wide_args = std::count(shorty + 1, shorty + 1 + num_args, 'J') + + std::count(shorty + 1, shorty + 1 + num_args, 'D'); + size_t num_arg_vregs = num_args + num_wide_args + (dex_compilation_unit_->IsStatic() ? 0u : 1u); + + // For simplicity, reserve 2 vregs (the maximum) for return value regardless of the return type. + size_t return_vregs = 2u; + graph_->SetNumberOfVRegs(return_vregs + num_arg_vregs); + graph_->SetNumberOfInVRegs(num_arg_vregs); + graph_->SetMaximumNumberOfOutVRegs(num_arg_vregs); + graph_->SetHasTryCatch(false); + + // Use ScopedArenaAllocator for all local allocations. + ScopedArenaAllocator local_allocator(graph_->GetArenaStack()); + HBasicBlockBuilder block_builder(graph_, + dex_file_, + CodeItemDebugInfoAccessor(), + &local_allocator); + SsaBuilder ssa_builder(graph_, + dex_compilation_unit_->GetClassLoader(), + dex_compilation_unit_->GetDexCache(), + handles_, + &local_allocator); + HInstructionBuilder instruction_builder(graph_, + &block_builder, + &ssa_builder, + dex_file_, + CodeItemDebugInfoAccessor(), + return_type_, + dex_compilation_unit_, + outer_compilation_unit_, + code_generator_, + interpreter_metadata_, + compilation_stats_, + handles_, + &local_allocator); + + // 1) Create basic blocks for the intrinsic and link them together. + block_builder.BuildIntrinsic(); + + // 2) Build the trivial dominator tree. + GraphAnalysisResult bdt_result = graph_->BuildDominatorTree(); + DCHECK_EQ(bdt_result, kAnalysisSuccess); + + // 3) Populate basic blocks with instructions for the intrinsic. + instruction_builder.BuildIntrinsic(method); + + // 4) Type the graph (no dead/redundant phis to eliminate). + GraphAnalysisResult build_ssa_result = ssa_builder.BuildSsa(); + DCHECK_EQ(build_ssa_result, kAnalysisSuccess); +} + +} // namespace art diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h new file mode 100644 index 0000000..6152740 --- /dev/null +++ b/compiler/optimizing/builder.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_ +#define ART_COMPILER_OPTIMIZING_BUILDER_H_ + +#include "base/arena_object.h" +#include "base/array_ref.h" +#include "dex/code_item_accessors.h" +#include "dex/dex_file-inl.h" +#include "dex/dex_file.h" +#include "nodes.h" + +namespace art { + +class ArtMethod; +class CodeGenerator; +class DexCompilationUnit; +class OptimizingCompilerStats; + +class HGraphBuilder : public ValueObject { + public: + HGraphBuilder(HGraph* graph, + const CodeItemDebugInfoAccessor& accessor, + const DexCompilationUnit* dex_compilation_unit, + const DexCompilationUnit* outer_compilation_unit, + CodeGenerator* code_generator, + OptimizingCompilerStats* compiler_stats, + ArrayRef interpreter_metadata, + VariableSizedHandleScope* handles); + + // Only for unit testing. + HGraphBuilder(HGraph* graph, + const DexCompilationUnit* dex_compilation_unit, + const CodeItemDebugInfoAccessor& accessor, + VariableSizedHandleScope* handles, + DataType::Type return_type = DataType::Type::kInt32); + + GraphAnalysisResult BuildGraph(); + void BuildIntrinsicGraph(ArtMethod* method); + + static constexpr const char* kBuilderPassName = "builder"; + + private: + bool SkipCompilation(size_t number_of_branches); + + HGraph* const graph_; + const DexFile* const dex_file_; + const CodeItemDebugInfoAccessor code_item_accessor_; // null for intrinsic graph. + + // The compilation unit of the current method being compiled. Note that + // it can be an inlined method. + const DexCompilationUnit* const dex_compilation_unit_; + + // The compilation unit of the enclosing method being compiled. + const DexCompilationUnit* const outer_compilation_unit_; + + CodeGenerator* const code_generator_; + + OptimizingCompilerStats* const compilation_stats_; + const ArrayRef interpreter_metadata_; + VariableSizedHandleScope* const handles_; + const DataType::Type return_type_; + + DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_BUILDER_H_ diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc new file mode 100644 index 0000000..c6232ef --- /dev/null +++ b/compiler/optimizing/cha_guard_optimization.cc @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cha_guard_optimization.h" + +namespace art { + +// Note we can only do CHA guard elimination/motion in a single pass, since +// if a guard is not removed, another guard might be removed due to +// the existence of the first guard. The first guard should not be further +// removed in another pass. For example, due to further optimizations, +// a receiver of a guard might turn out to be a parameter value, or defined at +// a different site, which makes the guard removable as a result. However +// it's not safe to remove the guard in another pass since another guard might +// have been removed due to the existence of this guard. +// +// As a consequence, we decided not to rely on other passes to remove them +// (such as GVN or instruction simplifier). + +class CHAGuardVisitor : HGraphVisitor { + public: + explicit CHAGuardVisitor(HGraph* graph) + : HGraphVisitor(graph), + block_has_cha_guard_(GetGraph()->GetBlocks().size(), + 0, + graph->GetAllocator()->Adapter(kArenaAllocCHA)), + instruction_iterator_(nullptr) { + number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards(); + DCHECK_NE(number_of_guards_to_visit_, 0u); + // Will recount number of guards during guard optimization. + GetGraph()->SetNumberOfCHAGuards(0); + } + + void VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) override; + + void VisitBasicBlock(HBasicBlock* block) override; + + private: + void RemoveGuard(HShouldDeoptimizeFlag* flag); + // Return true if `flag` is removed. + bool OptimizeForParameter(HShouldDeoptimizeFlag* flag, HInstruction* receiver); + // Return true if `flag` is removed. + bool OptimizeWithDominatingGuard(HShouldDeoptimizeFlag* flag, HInstruction* receiver); + // Return true if `flag` is hoisted. + bool HoistGuard(HShouldDeoptimizeFlag* flag, HInstruction* receiver); + + // Record if each block has any CHA guard. It's updated during the + // reverse post order visit. Use int instead of bool since ArenaVector + // does not support bool. + ArenaVector block_has_cha_guard_; + + // The iterator that's being used for this visitor. Need it to manually + // advance the iterator due to removing/moving more than one instruction. + HInstructionIterator* instruction_iterator_; + + // Used to short-circuit the pass when there is no more guards left to visit. + uint32_t number_of_guards_to_visit_; + + DISALLOW_COPY_AND_ASSIGN(CHAGuardVisitor); +}; + +void CHAGuardVisitor::VisitBasicBlock(HBasicBlock* block) { + if (number_of_guards_to_visit_ == 0) { + return; + } + // Skip phis, just iterate through instructions. + HInstructionIterator it(block->GetInstructions()); + instruction_iterator_ = ⁢ + for (; !it.Done(); it.Advance()) { + DCHECK(it.Current()->IsInBlock()); + it.Current()->Accept(this); + } +} + +void CHAGuardVisitor::RemoveGuard(HShouldDeoptimizeFlag* flag) { + HBasicBlock* block = flag->GetBlock(); + HInstruction* compare = flag->GetNext(); + DCHECK(compare->IsNotEqual()); + HInstruction* deopt = compare->GetNext(); + DCHECK(deopt->IsDeoptimize()); + + // Advance instruction iterator first before we remove the guard. + // We need to do it twice since we remove three instructions and the + // visitor is responsible for advancing it once. + instruction_iterator_->Advance(); + instruction_iterator_->Advance(); + block->RemoveInstruction(deopt); + block->RemoveInstruction(compare); + block->RemoveInstruction(flag); +} + +bool CHAGuardVisitor::OptimizeForParameter(HShouldDeoptimizeFlag* flag, + HInstruction* receiver) { + // If some compiled code is invalidated by CHA due to class loading, the + // compiled code will not be entered anymore. So the very fact that the + // compiled code is invoked guarantees that a parameter receiver conforms + // to all the CHA devirtualization assumptions made by the compiled code, + // since all parameter receivers pre-exist any (potential) invalidation of + // the compiled code. + // + // TODO: allow more cases such as a phi whose inputs are all parameters. + if (receiver->IsParameterValue()) { + RemoveGuard(flag); + return true; + } + return false; +} + +bool CHAGuardVisitor::OptimizeWithDominatingGuard(HShouldDeoptimizeFlag* flag, + HInstruction* receiver) { + // If there is another guard that dominates the current guard, and + // that guard is dominated by receiver's definition, then the current + // guard can be eliminated, since receiver must pre-exist that other + // guard, and passing that guard guarantees that receiver conforms to + // all the CHA devirtualization assumptions. + HBasicBlock* dominator = flag->GetBlock(); + HBasicBlock* receiver_def_block = receiver->GetBlock(); + + // Complexity of the following algorithm: + // We potentially need to traverse the full dominator chain to receiver_def_block, + // plus a (partial) linear search within one block for each guard. + // So the worst case for each guard is bounded by the size of the + // biggest block plus the depth of the dominating tree. + + while (dominator != receiver_def_block) { + if (block_has_cha_guard_[dominator->GetBlockId()] == 1) { + RemoveGuard(flag); + return true; + } + dominator = dominator->GetDominator(); + } + + // At this point dominator is the block where receiver is defined. + // We do a linear search within dominator to see if there is a guard after + // receiver's definition. + HInstruction* instruction; + if (dominator == flag->GetBlock()) { + // Flag and receiver are defined in the same block. Search backward from + // the current guard. + instruction = flag->GetPrevious(); + } else { + // Search backward from the last instruction of that dominator. + instruction = dominator->GetLastInstruction(); + } + while (instruction != receiver) { + if (instruction == nullptr) { + // receiver must be defined in this block, we didn't find it + // in the instruction list, so it must be a Phi. + DCHECK(receiver->IsPhi()); + break; + } + if (instruction->IsShouldDeoptimizeFlag()) { + RemoveGuard(flag); + return true; + } + instruction = instruction->GetPrevious(); + } + return false; +} + +bool CHAGuardVisitor::HoistGuard(HShouldDeoptimizeFlag* flag, + HInstruction* receiver) { + // If receiver is loop invariant, we can hoist the guard out of the + // loop since passing a guard before entering the loop guarantees that + // receiver conforms to all the CHA devirtualization assumptions. + // We only hoist guards out of the inner loop since that offers most of the + // benefit and it might help remove other guards in the inner loop. + HBasicBlock* block = flag->GetBlock(); + HLoopInformation* loop_info = block->GetLoopInformation(); + if (loop_info != nullptr && + !loop_info->IsIrreducible() && + loop_info->IsDefinedOutOfTheLoop(receiver)) { + HInstruction* compare = flag->GetNext(); + DCHECK(compare->IsNotEqual()); + HInstruction* deopt = compare->GetNext(); + DCHECK(deopt->IsDeoptimize()); + + // Advance instruction iterator first before we move the guard. + // We need to do it twice since we move three instructions and the + // visitor is responsible for advancing it once. + instruction_iterator_->Advance(); + instruction_iterator_->Advance(); + + HBasicBlock* pre_header = loop_info->GetPreHeader(); + flag->MoveBefore(pre_header->GetLastInstruction()); + compare->MoveBefore(pre_header->GetLastInstruction()); + + block->RemoveInstruction(deopt); + HInstruction* suspend = loop_info->GetSuspendCheck(); + // Need a new deoptimize instruction that copies the environment + // of the suspend instruction for the loop. + HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize( + GetGraph()->GetAllocator(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc()); + pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction()); + deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( + suspend->GetEnvironment(), loop_info->GetHeader()); + block_has_cha_guard_[pre_header->GetBlockId()] = 1; + GetGraph()->IncrementNumberOfCHAGuards(); + return true; + } + return false; +} + +void CHAGuardVisitor::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { + number_of_guards_to_visit_--; + HInstruction* receiver = flag->InputAt(0); + // Don't need the receiver anymore. + flag->RemoveInputAt(0); + if (receiver->IsNullCheck()) { + receiver = receiver->InputAt(0); + } + + if (OptimizeForParameter(flag, receiver)) { + DCHECK(!flag->IsInBlock()); + return; + } + if (OptimizeWithDominatingGuard(flag, receiver)) { + DCHECK(!flag->IsInBlock()); + return; + } + if (HoistGuard(flag, receiver)) { + DCHECK(flag->IsInBlock()); + return; + } + + // Need to keep the CHA guard in place. + block_has_cha_guard_[flag->GetBlock()->GetBlockId()] = 1; + GetGraph()->IncrementNumberOfCHAGuards(); +} + +bool CHAGuardOptimization::Run() { + if (graph_->GetNumberOfCHAGuards() == 0) { + return false; + } + CHAGuardVisitor visitor(graph_); + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + visitor.VisitBasicBlock(block); + } + return true; +} + +} // namespace art diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h new file mode 100644 index 0000000..440d51a --- /dev/null +++ b/compiler/optimizing/cha_guard_optimization.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CHA_GUARD_OPTIMIZATION_H_ +#define ART_COMPILER_OPTIMIZING_CHA_GUARD_OPTIMIZATION_H_ + +#include "optimization.h" + +namespace art { + +/** + * Optimize CHA guards by removing/moving them. + */ +class CHAGuardOptimization : public HOptimization { + public: + explicit CHAGuardOptimization(HGraph* graph, + const char* name = kCHAGuardOptimizationPassName) + : HOptimization(graph, name) {} + + bool Run() override; + + static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization"; + + private: + DISALLOW_COPY_AND_ASSIGN(CHAGuardOptimization); +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CHA_GUARD_OPTIMIZATION_H_ diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc new file mode 100644 index 0000000..cfd9ea6 --- /dev/null +++ b/compiler/optimizing/code_generator.cc @@ -0,0 +1,1758 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator.h" + +#ifdef ART_ENABLE_CODEGEN_arm +#include "code_generator_arm_vixl.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "code_generator_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +#include "code_generator_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "code_generator_x86_64.h" +#endif + +#include "base/bit_utils.h" +#include "base/bit_utils_iterator.h" +#include "base/casts.h" +#include "base/leb128.h" +#include "class_linker.h" +#include "compiled_method.h" +#include "dex/bytecode_utils.h" +#include "dex/code_item_accessors-inl.h" +#include "dex/verified_method.h" +#include "graph_visualizer.h" +#include "image.h" +#include "gc/space/image_space.h" +#include "intern_table.h" +#include "intrinsics.h" +#include "mirror/array-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object_reference.h" +#include "mirror/reference.h" +#include "mirror/string.h" +#include "parallel_move_resolver.h" +#include "scoped_thread_state_change-inl.h" +#include "ssa_liveness_analysis.h" +#include "stack_map.h" +#include "stack_map_stream.h" +#include "string_builder_append.h" +#include "thread-current-inl.h" +#include "utils/assembler.h" + +namespace art { + +// Return whether a location is consistent with a type. +static bool CheckType(DataType::Type type, Location location) { + if (location.IsFpuRegister() + || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) { + return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64); + } else if (location.IsRegister() || + (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) { + return DataType::IsIntegralType(type) || (type == DataType::Type::kReference); + } else if (location.IsRegisterPair()) { + return type == DataType::Type::kInt64; + } else if (location.IsFpuRegisterPair()) { + return type == DataType::Type::kFloat64; + } else if (location.IsStackSlot()) { + return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64) + || (type == DataType::Type::kFloat32) + || (type == DataType::Type::kReference); + } else if (location.IsDoubleStackSlot()) { + return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64); + } else if (location.IsConstant()) { + if (location.GetConstant()->IsIntConstant()) { + return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64); + } else if (location.GetConstant()->IsNullConstant()) { + return type == DataType::Type::kReference; + } else if (location.GetConstant()->IsLongConstant()) { + return type == DataType::Type::kInt64; + } else if (location.GetConstant()->IsFloatConstant()) { + return type == DataType::Type::kFloat32; + } else { + return location.GetConstant()->IsDoubleConstant() + && (type == DataType::Type::kFloat64); + } + } else { + return location.IsInvalid() || (location.GetPolicy() == Location::kAny); + } +} + +// Check that a location summary is consistent with an instruction. +static bool CheckTypeConsistency(HInstruction* instruction) { + LocationSummary* locations = instruction->GetLocations(); + if (locations == nullptr) { + return true; + } + + if (locations->Out().IsUnallocated() + && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) { + DCHECK(CheckType(instruction->GetType(), locations->InAt(0))) + << instruction->GetType() + << " " << locations->InAt(0); + } else { + DCHECK(CheckType(instruction->GetType(), locations->Out())) + << instruction->GetType() + << " " << locations->Out(); + } + + HConstInputsRef inputs = instruction->GetInputs(); + for (size_t i = 0; i < inputs.size(); ++i) { + DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i))) + << inputs[i]->GetType() << " " << locations->InAt(i); + } + + HEnvironment* environment = instruction->GetEnvironment(); + for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) { + if (environment->GetInstructionAt(i) != nullptr) { + DataType::Type type = environment->GetInstructionAt(i)->GetType(); + DCHECK(CheckType(type, environment->GetLocationAt(i))) + << type << " " << environment->GetLocationAt(i); + } else { + DCHECK(environment->GetLocationAt(i).IsInvalid()) + << environment->GetLocationAt(i); + } + } + return true; +} + +class CodeGenerator::CodeGenerationData : public DeletableArenaObject { + public: + static std::unique_ptr Create(ArenaStack* arena_stack, + InstructionSet instruction_set) { + ScopedArenaAllocator allocator(arena_stack); + void* memory = allocator.Alloc(kArenaAllocCodeGenerator); + return std::unique_ptr( + ::new (memory) CodeGenerationData(std::move(allocator), instruction_set)); + } + + ScopedArenaAllocator* GetScopedAllocator() { + return &allocator_; + } + + void AddSlowPath(SlowPathCode* slow_path) { + slow_paths_.emplace_back(std::unique_ptr(slow_path)); + } + + ArrayRef> GetSlowPaths() const { + return ArrayRef>(slow_paths_); + } + + StackMapStream* GetStackMapStream() { return &stack_map_stream_; } + + void ReserveJitStringRoot(StringReference string_reference, Handle string) { + jit_string_roots_.Overwrite(string_reference, + reinterpret_cast64(string.GetReference())); + } + + uint64_t GetJitStringRootIndex(StringReference string_reference) const { + return jit_string_roots_.Get(string_reference); + } + + size_t GetNumberOfJitStringRoots() const { + return jit_string_roots_.size(); + } + + void ReserveJitClassRoot(TypeReference type_reference, Handle klass) { + jit_class_roots_.Overwrite(type_reference, reinterpret_cast64(klass.GetReference())); + } + + uint64_t GetJitClassRootIndex(TypeReference type_reference) const { + return jit_class_roots_.Get(type_reference); + } + + size_t GetNumberOfJitClassRoots() const { + return jit_class_roots_.size(); + } + + size_t GetNumberOfJitRoots() const { + return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots(); + } + + void EmitJitRoots(/*out*/std::vector>* roots) + REQUIRES_SHARED(Locks::mutator_lock_); + + private: + CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set) + : allocator_(std::move(allocator)), + stack_map_stream_(&allocator_, instruction_set), + slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)), + jit_string_roots_(StringReferenceValueComparator(), + allocator_.Adapter(kArenaAllocCodeGenerator)), + jit_class_roots_(TypeReferenceValueComparator(), + allocator_.Adapter(kArenaAllocCodeGenerator)) { + slow_paths_.reserve(kDefaultSlowPathsCapacity); + } + + static constexpr size_t kDefaultSlowPathsCapacity = 8; + + ScopedArenaAllocator allocator_; + StackMapStream stack_map_stream_; + ScopedArenaVector> slow_paths_; + + // Maps a StringReference (dex_file, string_index) to the index in the literal table. + // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` + // will compute all the indices. + ScopedArenaSafeMap jit_string_roots_; + + // Maps a ClassReference (dex_file, type_index) to the index in the literal table. + // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` + // will compute all the indices. + ScopedArenaSafeMap jit_class_roots_; +}; + +void CodeGenerator::CodeGenerationData::EmitJitRoots( + /*out*/std::vector>* roots) { + DCHECK(roots->empty()); + roots->reserve(GetNumberOfJitRoots()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + size_t index = 0; + for (auto& entry : jit_string_roots_) { + // Update the `roots` with the string, and replace the address temporarily + // stored to the index in the table. + uint64_t address = entry.second; + roots->emplace_back(reinterpret_cast*>(address)); + DCHECK(roots->back() != nullptr); + DCHECK(roots->back()->IsString()); + entry.second = index; + // Ensure the string is strongly interned. This is a requirement on how the JIT + // handles strings. b/32995596 + class_linker->GetInternTable()->InternStrong(roots->back()->AsString()); + ++index; + } + for (auto& entry : jit_class_roots_) { + // Update the `roots` with the class, and replace the address temporarily + // stored to the index in the table. + uint64_t address = entry.second; + roots->emplace_back(reinterpret_cast*>(address)); + DCHECK(roots->back() != nullptr); + DCHECK(roots->back()->IsClass()); + entry.second = index; + ++index; + } +} + +ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetScopedAllocator(); +} + +StackMapStream* CodeGenerator::GetStackMapStream() { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetStackMapStream(); +} + +void CodeGenerator::ReserveJitStringRoot(StringReference string_reference, + Handle string) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->ReserveJitStringRoot(string_reference, string); +} + +uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetJitStringRootIndex(string_reference); +} + +void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle klass) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->ReserveJitClassRoot(type_reference, klass); +} + +uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetJitClassRootIndex(type_reference); +} + +void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED, + const uint8_t* roots_data ATTRIBUTE_UNUSED) { + DCHECK(code_generation_data_ != nullptr); + DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u); + DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u); +} + +uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) { + return array_length->IsStringLength() + ? mirror::String::CountOffset().Uint32Value() + : mirror::Array::LengthOffset().Uint32Value(); +} + +uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) { + DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt()); + return array_get->IsStringCharAt() + ? mirror::String::ValueOffset().Uint32Value() + : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value(); +} + +bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { + DCHECK_EQ((*block_order_)[current_block_index_], current); + return GetNextBlockToEmit() == FirstNonEmptyBlock(next); +} + +HBasicBlock* CodeGenerator::GetNextBlockToEmit() const { + for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) { + HBasicBlock* block = (*block_order_)[i]; + if (!block->IsSingleJump()) { + return block; + } + } + return nullptr; +} + +HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const { + while (block->IsSingleJump()) { + block = block->GetSuccessors()[0]; + } + return block; +} + +class DisassemblyScope { + public: + DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen) + : codegen_(codegen), instruction_(instruction), start_offset_(static_cast(-1)) { + if (codegen_.GetDisassemblyInformation() != nullptr) { + start_offset_ = codegen_.GetAssembler().CodeSize(); + } + } + + ~DisassemblyScope() { + // We avoid building this data when we know it will not be used. + if (codegen_.GetDisassemblyInformation() != nullptr) { + codegen_.GetDisassemblyInformation()->AddInstructionInterval( + instruction_, start_offset_, codegen_.GetAssembler().CodeSize()); + } + } + + private: + const CodeGenerator& codegen_; + HInstruction* instruction_; + size_t start_offset_; +}; + + +void CodeGenerator::GenerateSlowPaths() { + DCHECK(code_generation_data_ != nullptr); + size_t code_start = 0; + for (const std::unique_ptr& slow_path_ptr : code_generation_data_->GetSlowPaths()) { + SlowPathCode* slow_path = slow_path_ptr.get(); + current_slow_path_ = slow_path; + if (disasm_info_ != nullptr) { + code_start = GetAssembler()->CodeSize(); + } + // Record the dex pc at start of slow path (required for java line number mapping). + MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path); + slow_path->EmitNativeCode(this); + if (disasm_info_ != nullptr) { + disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize()); + } + } + current_slow_path_ = nullptr; +} + +void CodeGenerator::InitializeCodeGenerationData() { + DCHECK(code_generation_data_ == nullptr); + code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet()); +} + +void CodeGenerator::Compile(CodeAllocator* allocator) { + InitializeCodeGenerationData(); + + // The register allocator already called `InitializeCodeGeneration`, + // where the frame size has been computed. + DCHECK(block_order_ != nullptr); + Initialize(); + + HGraphVisitor* instruction_visitor = GetInstructionVisitor(); + DCHECK_EQ(current_block_index_, 0u); + + GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_, + core_spill_mask_, + fpu_spill_mask_, + GetGraph()->GetNumberOfVRegs(), + GetGraph()->IsCompilingBaseline()); + + size_t frame_start = GetAssembler()->CodeSize(); + GenerateFrameEntry(); + DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast(frame_size_)); + if (disasm_info_ != nullptr) { + disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize()); + } + + for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) { + HBasicBlock* block = (*block_order_)[current_block_index_]; + // Don't generate code for an empty block. Its predecessors will branch to its successor + // directly. Also, the label of that block will not be emitted, so this helps catch + // errors where we reference that label. + if (block->IsSingleJump()) continue; + Bind(block); + // This ensures that we have correct native line mapping for all native instructions. + // It is necessary to make stepping over a statement work. Otherwise, any initial + // instructions (e.g. moves) would be assumed to be the start of next statement. + MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc()); + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* current = it.Current(); + if (current->HasEnvironment()) { + // Create stackmap for HNativeDebugInfo or any instruction which calls native code. + // Note that we need correct mapping for the native PC of the call instruction, + // so the runtime's stackmap is not sufficient since it is at PC after the call. + MaybeRecordNativeDebugInfo(current, block->GetDexPc()); + } + DisassemblyScope disassembly_scope(current, *this); + DCHECK(CheckTypeConsistency(current)); + current->Accept(instruction_visitor); + } + } + + GenerateSlowPaths(); + + // Emit catch stack maps at the end of the stack map stream as expected by the + // runtime exception handler. + if (graph_->HasTryCatch()) { + RecordCatchBlockInfo(); + } + + // Finalize instructions in assember; + Finalize(allocator); + + GetStackMapStream()->EndMethod(); +} + +void CodeGenerator::Finalize(CodeAllocator* allocator) { + size_t code_size = GetAssembler()->CodeSize(); + uint8_t* buffer = allocator->Allocate(code_size); + + MemoryRegion code(buffer, code_size); + GetAssembler()->FinalizeInstructions(code); +} + +void CodeGenerator::EmitLinkerPatches( + ArenaVector* linker_patches ATTRIBUTE_UNUSED) { + // No linker patches by default. +} + +bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const { + // Code generators that create patches requiring thunk compilation should override this function. + return false; +} + +void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED, + /*out*/ ArenaVector* code ATTRIBUTE_UNUSED, + /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) { + // Code generators that create patches requiring thunk compilation should override this function. + LOG(FATAL) << "Unexpected call to EmitThunkCode()."; +} + +void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots, + size_t maximum_safepoint_spill_size, + size_t number_of_out_slots, + const ArenaVector& block_order) { + block_order_ = &block_order; + DCHECK(!block_order.empty()); + DCHECK(block_order[0] == GetGraph()->GetEntryBlock()); + ComputeSpillMask(); + first_register_slot_in_slow_path_ = RoundUp( + (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment()); + + if (number_of_spill_slots == 0 + && !HasAllocatedCalleeSaveRegisters() + && IsLeafMethod() + && !RequiresCurrentMethod()) { + DCHECK_EQ(maximum_safepoint_spill_size, 0u); + SetFrameSize(CallPushesPC() ? GetWordSize() : 0); + } else { + SetFrameSize(RoundUp( + first_register_slot_in_slow_path_ + + maximum_safepoint_spill_size + + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0) + + FrameEntrySpillSize(), + kStackAlignment)); + } +} + +void CodeGenerator::CreateCommonInvokeLocationSummary( + HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) { + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + LocationSummary* locations = new (allocator) LocationSummary(invoke, + LocationSummary::kCallOnMainOnly); + + for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { + HInstruction* input = invoke->InputAt(i); + locations->SetInAt(i, visitor->GetNextLocation(input->GetType())); + } + + locations->SetOut(visitor->GetReturnLocation(invoke->GetType())); + + if (invoke->IsInvokeStaticOrDirect()) { + HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect(); + switch (call->GetMethodLoadKind()) { + case HInvokeStaticOrDirect::MethodLoadKind::kRecursive: + locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation()); + break; + case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: + locations->AddTemp(visitor->GetMethodLocation()); + locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister()); + break; + default: + locations->AddTemp(visitor->GetMethodLocation()); + break; + } + } else if (!invoke->IsInvokePolymorphic()) { + locations->AddTemp(visitor->GetMethodLocation()); + } +} + +void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) { + MoveConstant(temp, invoke->GetDexMethodIndex()); + + // The access check is unnecessary but we do not want to introduce + // extra entrypoints for the codegens that do not support some + // invoke type and fall back to the runtime call. + + // Initialize to anything to silent compiler warnings. + QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck; + switch (invoke->GetInvokeType()) { + case kStatic: + entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck; + break; + case kDirect: + entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck; + break; + case kSuper: + entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck; + break; + case kVirtual: + case kInterface: + case kPolymorphic: + case kCustom: + LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType(); + UNREACHABLE(); + } + + InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path); +} +void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) { + MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex()); + + // Initialize to anything to silent compiler warnings. + QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck; + switch (invoke->GetInvokeType()) { + case kStatic: + entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck; + break; + case kDirect: + entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck; + break; + case kVirtual: + entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck; + break; + case kSuper: + entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck; + break; + case kInterface: + entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck; + break; + case kPolymorphic: + case kCustom: + LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType(); + UNREACHABLE(); + } + InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr); +} + +void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) { + // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a + // method index) since it requires multiple info from the instruction (registers A, B, H). Not + // using the reservation has no effect on the registers used in the runtime call. + QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic; + InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr); +} + +void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) { + MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex()); + QuickEntrypointEnum entrypoint = kQuickInvokeCustom; + InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr); +} + +void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction, + Location out) { + ArenaAllocator* allocator = GetGraph()->GetAllocator(); + LocationSummary* locations = + new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly); + locations->SetOut(out); + instruction->GetLocations()->SetInAt(instruction->FormatIndex(), + Location::ConstantLocation(instruction->GetFormat())); + + uint32_t format = static_cast(instruction->GetFormat()->GetValue()); + uint32_t f = format; + PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet()); + size_t stack_offset = static_cast(pointer_size); // Start after the ArtMethod*. + for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) { + StringBuilderAppend::Argument arg_type = + static_cast(f & StringBuilderAppend::kArgMask); + switch (arg_type) { + case StringBuilderAppend::Argument::kStringBuilder: + case StringBuilderAppend::Argument::kString: + case StringBuilderAppend::Argument::kCharArray: + static_assert(sizeof(StackReference) == sizeof(uint32_t), "Size check."); + FALLTHROUGH_INTENDED; + case StringBuilderAppend::Argument::kBoolean: + case StringBuilderAppend::Argument::kChar: + case StringBuilderAppend::Argument::kInt: + case StringBuilderAppend::Argument::kFloat: + locations->SetInAt(i, Location::StackSlot(stack_offset)); + break; + case StringBuilderAppend::Argument::kLong: + case StringBuilderAppend::Argument::kDouble: + stack_offset = RoundUp(stack_offset, sizeof(uint64_t)); + locations->SetInAt(i, Location::DoubleStackSlot(stack_offset)); + // Skip the low word, let the common code skip the high word. + stack_offset += sizeof(uint32_t); + break; + default: + LOG(FATAL) << "Unexpected arg format: 0x" << std::hex + << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format; + UNREACHABLE(); + } + f >>= StringBuilderAppend::kBitsPerArg; + stack_offset += sizeof(uint32_t); + } + DCHECK_EQ(f, 0u); + + size_t param_size = stack_offset - static_cast(pointer_size); + DCHECK_ALIGNED(param_size, kVRegSize); + size_t num_vregs = param_size / kVRegSize; + graph_->UpdateMaximumNumberOfOutVRegs(num_vregs); +} + +void CodeGenerator::CreateUnresolvedFieldLocationSummary( + HInstruction* field_access, + DataType::Type field_type, + const FieldAccessCallingConvention& calling_convention) { + bool is_instance = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedInstanceFieldSet(); + bool is_get = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedStaticFieldGet(); + + ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator(); + LocationSummary* locations = + new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly); + + locations->AddTemp(calling_convention.GetFieldIndexLocation()); + + if (is_instance) { + // Add the `this` object for instance field accesses. + locations->SetInAt(0, calling_convention.GetObjectLocation()); + } + + // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64 + // regardless of the the type. Because of that we forced to special case + // the access to floating point values. + if (is_get) { + if (DataType::IsFloatingPointType(field_type)) { + // The return value will be stored in regular registers while register + // allocator expects it in a floating point register. + // Note We don't need to request additional temps because the return + // register(s) are already blocked due the call and they may overlap with + // the input or field index. + // The transfer between the two will be done at codegen level. + locations->SetOut(calling_convention.GetFpuLocation(field_type)); + } else { + locations->SetOut(calling_convention.GetReturnLocation(field_type)); + } + } else { + size_t set_index = is_instance ? 1 : 0; + if (DataType::IsFloatingPointType(field_type)) { + // The set value comes from a float location while the calling convention + // expects it in a regular register location. Allocate a temp for it and + // make the transfer at codegen. + AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations); + locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type)); + } else { + locations->SetInAt(set_index, + calling_convention.GetSetValueLocation(field_type, is_instance)); + } + } +} + +void CodeGenerator::GenerateUnresolvedFieldAccess( + HInstruction* field_access, + DataType::Type field_type, + uint32_t field_index, + uint32_t dex_pc, + const FieldAccessCallingConvention& calling_convention) { + LocationSummary* locations = field_access->GetLocations(); + + MoveConstant(locations->GetTemp(0), field_index); + + bool is_instance = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedInstanceFieldSet(); + bool is_get = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedStaticFieldGet(); + + if (!is_get && DataType::IsFloatingPointType(field_type)) { + // Copy the float value to be set into the calling convention register. + // Note that using directly the temp location is problematic as we don't + // support temp register pairs. To avoid boilerplate conversion code, use + // the location from the calling convention. + MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance), + locations->InAt(is_instance ? 1 : 0), + (DataType::Is64BitType(field_type) ? DataType::Type::kInt64 + : DataType::Type::kInt32)); + } + + QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings. + switch (field_type) { + case DataType::Type::kBool: + entrypoint = is_instance + ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance) + : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static); + break; + case DataType::Type::kInt8: + entrypoint = is_instance + ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance) + : (is_get ? kQuickGetByteStatic : kQuickSet8Static); + break; + case DataType::Type::kInt16: + entrypoint = is_instance + ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance) + : (is_get ? kQuickGetShortStatic : kQuickSet16Static); + break; + case DataType::Type::kUint16: + entrypoint = is_instance + ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance) + : (is_get ? kQuickGetCharStatic : kQuickSet16Static); + break; + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + entrypoint = is_instance + ? (is_get ? kQuickGet32Instance : kQuickSet32Instance) + : (is_get ? kQuickGet32Static : kQuickSet32Static); + break; + case DataType::Type::kReference: + entrypoint = is_instance + ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance) + : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic); + break; + case DataType::Type::kInt64: + case DataType::Type::kFloat64: + entrypoint = is_instance + ? (is_get ? kQuickGet64Instance : kQuickSet64Instance) + : (is_get ? kQuickGet64Static : kQuickSet64Static); + break; + default: + LOG(FATAL) << "Invalid type " << field_type; + } + InvokeRuntime(entrypoint, field_access, dex_pc, nullptr); + + if (is_get && DataType::IsFloatingPointType(field_type)) { + MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type); + } +} + +void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls, + Location runtime_type_index_location, + Location runtime_return_location) { + DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall); + DCHECK_EQ(cls->InputCount(), 1u); + LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary( + cls, LocationSummary::kCallOnMainOnly); + locations->SetInAt(0, Location::NoLocation()); + locations->AddTemp(runtime_type_index_location); + locations->SetOut(runtime_return_location); +} + +void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) { + DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall); + DCHECK(!cls->MustGenerateClinitCheck()); + LocationSummary* locations = cls->GetLocations(); + MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_); + if (cls->NeedsAccessCheck()) { + CheckEntrypointTypes(); + InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc()); + } else { + CheckEntrypointTypes(); + InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc()); + } +} + +void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary( + HLoadMethodHandle* method_handle, + Location runtime_proto_index_location, + Location runtime_return_location) { + DCHECK_EQ(method_handle->InputCount(), 1u); + LocationSummary* locations = + new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary( + method_handle, LocationSummary::kCallOnMainOnly); + locations->SetInAt(0, Location::NoLocation()); + locations->AddTemp(runtime_proto_index_location); + locations->SetOut(runtime_return_location); +} + +void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) { + LocationSummary* locations = method_handle->GetLocations(); + MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex()); + CheckEntrypointTypes(); + InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc()); +} + +void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary( + HLoadMethodType* method_type, + Location runtime_proto_index_location, + Location runtime_return_location) { + DCHECK_EQ(method_type->InputCount(), 1u); + LocationSummary* locations = + new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary( + method_type, LocationSummary::kCallOnMainOnly); + locations->SetInAt(0, Location::NoLocation()); + locations->AddTemp(runtime_proto_index_location); + locations->SetOut(runtime_return_location); +} + +void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) { + LocationSummary* locations = method_type->GetLocations(); + MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_); + CheckEntrypointTypes(); + InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc()); +} + +static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) { + Runtime* runtime = Runtime::Current(); + DCHECK(runtime->IsAotCompiler()); + const std::vector& boot_image_spaces = + runtime->GetHeap()->GetBootImageSpaces(); + // Check that the `object` is in the expected section of one of the boot image files. + DCHECK(std::any_of(boot_image_spaces.begin(), + boot_image_spaces.end(), + [object, section](gc::space::ImageSpace* space) { + uintptr_t begin = reinterpret_cast(space->Begin()); + uintptr_t offset = reinterpret_cast(object) - begin; + return space->GetImageHeader().GetImageSection(section).Contains(offset); + })); + uintptr_t begin = reinterpret_cast(boot_image_spaces.front()->Begin()); + uintptr_t offset = reinterpret_cast(object) - begin; + return dchecked_integral_cast(offset); +} + +// NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable. +uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS { + DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo); + ObjPtr klass = load_class->GetClass().Get(); + DCHECK(klass != nullptr); + return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects); +} + +// NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable. +uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS { + DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo); + ObjPtr string = load_string->GetString().Get(); + DCHECK(string != nullptr); + return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects); +} + +uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) { + DCHECK_EQ(invoke->GetMethodLoadKind(), HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo); + ArtMethod* method = invoke->GetResolvedMethod(); + DCHECK(method != nullptr); + return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods); +} + +void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const { + // The DCHECKS below check that a register is not specified twice in + // the summary. The out location can overlap with an input, so we need + // to special case it. + if (location.IsRegister()) { + DCHECK(is_out || !blocked_core_registers_[location.reg()]); + blocked_core_registers_[location.reg()] = true; + } else if (location.IsFpuRegister()) { + DCHECK(is_out || !blocked_fpu_registers_[location.reg()]); + blocked_fpu_registers_[location.reg()] = true; + } else if (location.IsFpuRegisterPair()) { + DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow()]); + blocked_fpu_registers_[location.AsFpuRegisterPairLow()] = true; + DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh()]); + blocked_fpu_registers_[location.AsFpuRegisterPairHigh()] = true; + } else if (location.IsRegisterPair()) { + DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow()]); + blocked_core_registers_[location.AsRegisterPairLow()] = true; + DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh()]); + blocked_core_registers_[location.AsRegisterPairHigh()] = true; + } +} + +void CodeGenerator::AllocateLocations(HInstruction* instruction) { + for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) { + env->AllocateLocations(); + } + instruction->Accept(GetLocationBuilder()); + DCHECK(CheckTypeConsistency(instruction)); + LocationSummary* locations = instruction->GetLocations(); + if (!instruction->IsSuspendCheckEntry()) { + if (locations != nullptr) { + if (locations->CanCall()) { + MarkNotLeaf(); + } else if (locations->Intrinsified() && + instruction->IsInvokeStaticOrDirect() && + !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) { + // A static method call that has been fully intrinsified, and cannot call on the slow + // path or refer to the current method directly, no longer needs current method. + return; + } + } + if (instruction->NeedsCurrentMethod()) { + SetRequiresCurrentMethod(); + } + } +} + +std::unique_ptr CodeGenerator::Create(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) { + ArenaAllocator* allocator = graph->GetAllocator(); + switch (compiler_options.GetInstructionSet()) { +#ifdef ART_ENABLE_CODEGEN_arm + case InstructionSet::kArm: + case InstructionSet::kThumb2: { + return std::unique_ptr( + new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats)); + } +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + case InstructionSet::kArm64: { + return std::unique_ptr( + new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats)); + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case InstructionSet::kX86: { + return std::unique_ptr( + new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats)); + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case InstructionSet::kX86_64: { + return std::unique_ptr( + new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats)); + } +#endif + default: + return nullptr; + } +} + +CodeGenerator::CodeGenerator(HGraph* graph, + size_t number_of_core_registers, + size_t number_of_fpu_registers, + size_t number_of_register_pairs, + uint32_t core_callee_save_mask, + uint32_t fpu_callee_save_mask, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) + : frame_size_(0), + core_spill_mask_(0), + fpu_spill_mask_(0), + first_register_slot_in_slow_path_(0), + allocated_registers_(RegisterSet::Empty()), + blocked_core_registers_(graph->GetAllocator()->AllocArray(number_of_core_registers, + kArenaAllocCodeGenerator)), + blocked_fpu_registers_(graph->GetAllocator()->AllocArray(number_of_fpu_registers, + kArenaAllocCodeGenerator)), + number_of_core_registers_(number_of_core_registers), + number_of_fpu_registers_(number_of_fpu_registers), + number_of_register_pairs_(number_of_register_pairs), + core_callee_save_mask_(core_callee_save_mask), + fpu_callee_save_mask_(fpu_callee_save_mask), + block_order_(nullptr), + disasm_info_(nullptr), + stats_(stats), + graph_(graph), + compiler_options_(compiler_options), + current_slow_path_(nullptr), + current_block_index_(0), + is_leaf_(true), + requires_current_method_(false), + code_generation_data_() { + if (GetGraph()->IsCompilingOsr()) { + // Make OSR methods have all registers spilled, this simplifies the logic of + // jumping to the compiled code directly. + for (size_t i = 0; i < number_of_core_registers_; ++i) { + if (IsCoreCalleeSaveRegister(i)) { + AddAllocatedRegister(Location::RegisterLocation(i)); + } + } + for (size_t i = 0; i < number_of_fpu_registers_; ++i) { + if (IsFloatingPointCalleeSaveRegister(i)) { + AddAllocatedRegister(Location::FpuRegisterLocation(i)); + } + } + } +} + +CodeGenerator::~CodeGenerator() {} + +size_t CodeGenerator::GetNumberOfJitRoots() const { + DCHECK(code_generation_data_ != nullptr); + return code_generation_data_->GetNumberOfJitRoots(); +} + +static void CheckCovers(uint32_t dex_pc, + const HGraph& graph, + const CodeInfo& code_info, + const ArenaVector& loop_headers, + ArenaVector* covered) { + for (size_t i = 0; i < loop_headers.size(); ++i) { + if (loop_headers[i]->GetDexPc() == dex_pc) { + if (graph.IsCompilingOsr()) { + DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid()); + } + ++(*covered)[i]; + } + } +} + +// Debug helper to ensure loop entries in compiled code are matched by +// dex branch instructions. +static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, + const CodeInfo& code_info, + const dex::CodeItem& code_item) { + if (graph.HasTryCatch()) { + // One can write loops through try/catch, which we do not support for OSR anyway. + return; + } + ArenaVector loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc)); + for (HBasicBlock* block : graph.GetReversePostOrder()) { + if (block->IsLoopHeader()) { + HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck(); + if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) { + loop_headers.push_back(suspend_check); + } + } + } + ArenaVector covered( + loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc)); + for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(), + &code_item)) { + const uint32_t dex_pc = pair.DexPc(); + const Instruction& instruction = pair.Inst(); + if (instruction.IsBranch()) { + uint32_t target = dex_pc + instruction.GetTargetOffset(); + CheckCovers(target, graph, code_info, loop_headers, &covered); + } else if (instruction.IsSwitch()) { + DexSwitchTable table(instruction, dex_pc); + uint16_t num_entries = table.GetNumEntries(); + size_t offset = table.GetFirstValueIndex(); + + // Use a larger loop counter type to avoid overflow issues. + for (size_t i = 0; i < num_entries; ++i) { + // The target of the case. + uint32_t target = dex_pc + table.GetEntryAt(i + offset); + CheckCovers(target, graph, code_info, loop_headers, &covered); + } + } + } + + for (size_t i = 0; i < covered.size(); ++i) { + DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent"; + } +} + +ScopedArenaVector CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) { + ScopedArenaVector stack_map = GetStackMapStream()->Encode(); + if (kIsDebugBuild && code_item != nullptr) { + CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item); + } + return stack_map; +} + +// Returns whether stackmap dex register info is needed for the instruction. +// +// The following cases mandate having a dex register map: +// * Deoptimization +// when we need to obtain the values to restore actual vregisters for interpreter. +// * Debuggability +// when we want to observe the values / asynchronously deoptimize. +// * Monitor operations +// to allow dumping in a stack trace locked dex registers for non-debuggable code. +// * On-stack-replacement (OSR) +// when entering compiled for OSR code from the interpreter we need to initialize the compiled +// code values with the values from the vregisters. +// * Method local catch blocks +// a catch block must see the environment of the instruction from the same method that can +// throw to this block. +static bool NeedsVregInfo(HInstruction* instruction, bool osr) { + HGraph* graph = instruction->GetBlock()->GetGraph(); + return instruction->IsDeoptimize() || + graph->IsDebuggable() || + graph->HasMonitorOperations() || + osr || + instruction->CanThrowIntoCatchBlock(); +} + +void CodeGenerator::RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path, + bool native_debug_info) { + RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info); +} + +void CodeGenerator::RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + uint32_t native_pc, + SlowPathCode* slow_path, + bool native_debug_info) { + if (instruction != nullptr) { + // The code generated for some type conversions + // may call the runtime, thus normally requiring a subsequent + // call to this method. However, the method verifier does not + // produce PC information for certain instructions, which are + // considered "atomic" (they cannot join a GC). + // Therefore we do not currently record PC information for such + // instructions. As this may change later, we added this special + // case so that code generators may nevertheless call + // CodeGenerator::RecordPcInfo without triggering an error in + // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x") + // thereafter. + if (instruction->IsTypeConversion()) { + return; + } + if (instruction->IsRem()) { + DataType::Type type = instruction->AsRem()->GetResultType(); + if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) { + return; + } + } + } + + StackMapStream* stack_map_stream = GetStackMapStream(); + if (instruction == nullptr) { + // For stack overflow checks and native-debug-info entries without dex register + // mapping (i.e. start of basic block or start of slow path). + stack_map_stream->BeginStackMapEntry(dex_pc, native_pc); + stack_map_stream->EndStackMapEntry(); + return; + } + + LocationSummary* locations = instruction->GetLocations(); + uint32_t register_mask = locations->GetRegisterMask(); + DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u); + if (locations->OnlyCallsOnSlowPath()) { + // In case of slow path, we currently set the location of caller-save registers + // to register (instead of their stack location when pushed before the slow-path + // call). Therefore register_mask contains both callee-save and caller-save + // registers that hold objects. We must remove the spilled caller-save from the + // mask, since they will be overwritten by the callee. + uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true); + register_mask &= ~spills; + } else { + // The register mask must be a subset of callee-save registers. + DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask); + } + + uint32_t outer_dex_pc = dex_pc; + uint32_t outer_environment_size = 0u; + uint32_t inlining_depth = 0; + HEnvironment* const environment = instruction->GetEnvironment(); + if (environment != nullptr) { + HEnvironment* outer_environment = environment; + while (outer_environment->GetParent() != nullptr) { + outer_environment = outer_environment->GetParent(); + ++inlining_depth; + } + outer_dex_pc = outer_environment->GetDexPc(); + outer_environment_size = outer_environment->Size(); + } + + HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); + bool osr = + instruction->IsSuspendCheck() && + (info != nullptr) && + graph_->IsCompilingOsr() && + (inlining_depth == 0); + StackMap::Kind kind = native_debug_info + ? StackMap::Kind::Debug + : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default); + bool needs_vreg_info = NeedsVregInfo(instruction, osr); + stack_map_stream->BeginStackMapEntry(outer_dex_pc, + native_pc, + register_mask, + locations->GetStackMask(), + kind, + needs_vreg_info); + + EmitEnvironment(environment, slow_path, needs_vreg_info); + stack_map_stream->EndStackMapEntry(); + + if (osr) { + DCHECK_EQ(info->GetSuspendCheck(), instruction); + DCHECK(info->IsIrreducible()); + DCHECK(environment != nullptr); + if (kIsDebugBuild) { + for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { + HInstruction* in_environment = environment->GetInstructionAt(i); + if (in_environment != nullptr) { + DCHECK(in_environment->IsPhi() || in_environment->IsConstant()); + Location location = environment->GetLocationAt(i); + DCHECK(location.IsStackSlot() || + location.IsDoubleStackSlot() || + location.IsConstant() || + location.IsInvalid()); + if (location.IsStackSlot() || location.IsDoubleStackSlot()) { + DCHECK_LT(location.GetStackIndex(), static_cast(GetFrameSize())); + } + } + } + } + } +} + +bool CodeGenerator::HasStackMapAtCurrentPc() { + uint32_t pc = GetAssembler()->CodeSize(); + StackMapStream* stack_map_stream = GetStackMapStream(); + size_t count = stack_map_stream->GetNumberOfStackMaps(); + if (count == 0) { + return false; + } + return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc; +} + +void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) { + if (HasStackMapAtCurrentPc()) { + // Ensure that we do not collide with the stack map of the previous instruction. + GenerateNop(); + } + RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true); + } +} + +void CodeGenerator::RecordCatchBlockInfo() { + StackMapStream* stack_map_stream = GetStackMapStream(); + + for (HBasicBlock* block : *block_order_) { + if (!block->IsCatchBlock()) { + continue; + } + + uint32_t dex_pc = block->GetDexPc(); + uint32_t num_vregs = graph_->GetNumberOfVRegs(); + uint32_t native_pc = GetAddressOf(block); + + stack_map_stream->BeginStackMapEntry(dex_pc, + native_pc, + /* register_mask= */ 0, + /* sp_mask= */ nullptr, + StackMap::Kind::Catch); + + HInstruction* current_phi = block->GetFirstPhi(); + for (size_t vreg = 0; vreg < num_vregs; ++vreg) { + while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) { + HInstruction* next_phi = current_phi->GetNext(); + DCHECK(next_phi == nullptr || + current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber()) + << "Phis need to be sorted by vreg number to keep this a linear-time loop."; + current_phi = next_phi; + } + + if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) { + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + } else { + Location location = current_phi->GetLocations()->Out(); + switch (location.GetKind()) { + case Location::kStackSlot: { + stack_map_stream->AddDexRegisterEntry( + DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); + break; + } + case Location::kDoubleStackSlot: { + stack_map_stream->AddDexRegisterEntry( + DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); + stack_map_stream->AddDexRegisterEntry( + DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize)); + ++vreg; + DCHECK_LT(vreg, num_vregs); + break; + } + default: { + // All catch phis must be allocated to a stack slot. + LOG(FATAL) << "Unexpected kind " << location.GetKind(); + UNREACHABLE(); + } + } + } + } + + stack_map_stream->EndStackMapEntry(); + } +} + +void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) { + DCHECK(code_generation_data_ != nullptr); + code_generation_data_->AddSlowPath(slow_path); +} + +void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) { + StackMapStream* stack_map_stream = GetStackMapStream(); + // Walk over the environment, and record the location of dex registers. + for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { + HInstruction* current = environment->GetInstructionAt(i); + if (current == nullptr) { + stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); + continue; + } + + using Kind = DexRegisterLocation::Kind; + Location location = environment->GetLocationAt(i); + switch (location.GetKind()) { + case Location::kConstant: { + DCHECK_EQ(current, location.GetConstant()); + if (current->IsLongConstant()) { + int64_t value = current->AsLongConstant()->GetValue(); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value)); + ++i; + DCHECK_LT(i, environment_size); + } else if (current->IsDoubleConstant()) { + int64_t value = bit_cast(current->AsDoubleConstant()->GetValue()); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value)); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value)); + ++i; + DCHECK_LT(i, environment_size); + } else if (current->IsIntConstant()) { + int32_t value = current->AsIntConstant()->GetValue(); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value); + } else if (current->IsNullConstant()) { + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0); + } else { + DCHECK(current->IsFloatConstant()) << current->DebugName(); + int32_t value = bit_cast(current->AsFloatConstant()->GetValue()); + stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value); + } + break; + } + + case Location::kStackSlot: { + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex()); + break; + } + + case Location::kDoubleStackSlot: { + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex()); + stack_map_stream->AddDexRegisterEntry( + Kind::kInStack, location.GetHighStackIndex(kVRegSize)); + ++i; + DCHECK_LT(i, environment_size); + break; + } + + case Location::kRegister : { + int id = location.reg(); + if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) { + uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + if (current->GetType() == DataType::Type::kInt64) { + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize); + ++i; + DCHECK_LT(i, environment_size); + } + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id); + if (current->GetType() == DataType::Type::kInt64) { + stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id); + ++i; + DCHECK_LT(i, environment_size); + } + } + break; + } + + case Location::kFpuRegister : { + int id = location.reg(); + if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) { + uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + if (current->GetType() == DataType::Type::kFloat64) { + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize); + ++i; + DCHECK_LT(i, environment_size); + } + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id); + if (current->GetType() == DataType::Type::kFloat64) { + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id); + ++i; + DCHECK_LT(i, environment_size); + } + } + break; + } + + case Location::kFpuRegisterPair : { + int low = location.low(); + int high = location.high(); + if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) { + uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low); + } + if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) { + uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + ++i; + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high); + ++i; + } + DCHECK_LT(i, environment_size); + break; + } + + case Location::kRegisterPair : { + int low = location.low(); + int high = location.high(); + if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) { + uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low); + } + if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) { + uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high); + stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset); + } else { + stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high); + } + ++i; + DCHECK_LT(i, environment_size); + break; + } + + case Location::kInvalid: { + stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0); + break; + } + + default: + LOG(FATAL) << "Unexpected kind " << location.GetKind(); + } + } +} + +void CodeGenerator::EmitEnvironment(HEnvironment* environment, + SlowPathCode* slow_path, + bool needs_vreg_info) { + if (environment == nullptr) return; + + StackMapStream* stack_map_stream = GetStackMapStream(); + bool emit_inline_info = environment->GetParent() != nullptr; + + if (emit_inline_info) { + // We emit the parent environment first. + EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info); + stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(), + environment->GetDexPc(), + needs_vreg_info ? environment->Size() : 0, + &graph_->GetDexFile()); + } + + if (needs_vreg_info) { + // If a dex register map is not required we just won't emit it. + EmitVRegInfo(environment, slow_path); + } + + if (emit_inline_info) { + stack_map_stream->EndInlineInfoEntry(); + } +} + +bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { + return null_check->IsEmittedAtUseSite(); +} + +void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { + HNullCheck* null_check = instr->GetImplicitNullCheck(); + if (null_check != nullptr) { + RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition()); + } +} + +LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction, + RegisterSet caller_saves) { + // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the + // HSuspendCheck from entry block). However, it will still get a valid stack frame + // because the HNullCheck needs an environment. + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + // When throwing from a try block, we may need to retrieve dalvik registers from + // physical registers and we also need to set up stack mask for GC. This is + // implicitly achieved by passing kCallOnSlowPath to the LocationSummary. + bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock(); + if (can_throw_into_catch_block) { + call_kind = LocationSummary::kCallOnSlowPath; + } + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); + if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) { + locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers. + } + DCHECK(!instruction->HasUses()); + return locations; +} + +void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) { + if (compiler_options_.GetImplicitNullChecks()) { + MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated); + GenerateImplicitNullCheck(instruction); + } else { + MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated); + GenerateExplicitNullCheck(instruction); + } +} + +void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check, + HParallelMove* spills) const { + LocationSummary* locations = suspend_check->GetLocations(); + HBasicBlock* block = suspend_check->GetBlock(); + DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); + DCHECK(block->IsLoopHeader()); + DCHECK(block->GetFirstInstruction() == spills); + + for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) { + Location dest = spills->MoveOperandsAt(i)->GetDestination(); + // All parallel moves in loop headers are spills. + DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest; + // Clear the stack bit marking a reference. Do not bother to check if the spill is + // actually a reference spill, clearing bits that are already zero is harmless. + locations->ClearStackBit(dest.GetStackIndex() / kVRegSize); + } +} + +void CodeGenerator::EmitParallelMoves(Location from1, + Location to1, + DataType::Type type1, + Location from2, + Location to2, + DataType::Type type2) { + HParallelMove parallel_move(GetGraph()->GetAllocator()); + parallel_move.AddMove(from1, to1, type1, nullptr); + parallel_move.AddMove(from2, to2, type2, nullptr); + GetMoveResolver()->EmitNativeCode(¶llel_move); +} + +void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + SlowPathCode* slow_path) { + // Ensure that the call kind indication given to the register allocator is + // coherent with the runtime call generated. + if (slow_path == nullptr) { + DCHECK(instruction->GetLocations()->WillCall()) + << "instruction->DebugName()=" << instruction->DebugName(); + } else { + DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal()) + << "instruction->DebugName()=" << instruction->DebugName() + << " slow_path->GetDescription()=" << slow_path->GetDescription(); + } + + // Check that the GC side effect is set when required. + // TODO: Reverse EntrypointCanTriggerGC + if (EntrypointCanTriggerGC(entrypoint)) { + if (slow_path == nullptr) { + DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC())) + << "instruction->DebugName()=" << instruction->DebugName() + << " instruction->GetSideEffects().ToString()=" + << instruction->GetSideEffects().ToString(); + } else { + // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend + // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However + // if execution never returns to the compiled code from a GC point this restriction is + // unnecessary - in particular for fatal slow paths which might trigger GC. + DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) || + instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || + // When (non-Baker) read barriers are enabled, some instructions + // use a slow path to emit a read barrier, which does not trigger + // GC. + (kEmitCompilerReadBarrier && + !kUseBakerReadBarrier && + (instruction->IsInstanceFieldGet() || + instruction->IsStaticFieldGet() || + instruction->IsArrayGet() || + instruction->IsLoadClass() || + instruction->IsLoadString() || + instruction->IsInstanceOf() || + instruction->IsCheckCast() || + (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified())))) + << "instruction->DebugName()=" << instruction->DebugName() + << " instruction->GetSideEffects().ToString()=" + << instruction->GetSideEffects().ToString() + << " slow_path->GetDescription()=" << slow_path->GetDescription(); + } + } else { + // The GC side effect is not required for the instruction. But the instruction might still have + // it, for example if it calls other entrypoints requiring it. + } + + // Check the coherency of leaf information. + DCHECK(instruction->IsSuspendCheck() + || ((slow_path != nullptr) && slow_path->IsFatal()) + || instruction->GetLocations()->CanCall() + || !IsLeafMethod()) + << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : ""); +} + +void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction, + SlowPathCode* slow_path) { + DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath()) + << "instruction->DebugName()=" << instruction->DebugName() + << " slow_path->GetDescription()=" << slow_path->GetDescription(); + // Only the Baker read barrier marking slow path used by certains + // instructions is expected to invoke the runtime without recording + // PC-related information. + DCHECK(kUseBakerReadBarrier); + DCHECK(instruction->IsInstanceFieldGet() || + instruction->IsStaticFieldGet() || + instruction->IsArrayGet() || + instruction->IsArraySet() || + instruction->IsLoadClass() || + instruction->IsLoadString() || + instruction->IsInstanceOf() || + instruction->IsCheckCast() || + (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) || + (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified())) + << "instruction->DebugName()=" << instruction->DebugName() + << " slow_path->GetDescription()=" << slow_path->GetDescription(); +} + +void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + // If the register holds an object, update the stack mask. + if (locations->RegisterContainsObject(i)) { + locations->SetStackBit(stack_offset / kVRegSize); + } + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_core_stack_offsets_[i] = stack_offset; + stack_offset += codegen->SaveCoreRegister(stack_offset, i); + } + + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_fpu_stack_offsets_[i] = stack_offset; + stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i); + } +} + +void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + stack_offset += codegen->RestoreCoreRegister(stack_offset, i); + } + + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i); + } +} + +void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) { + // Check to see if we have known failures that will cause us to have to bail out + // to the runtime, and just generate the runtime call directly. + HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant(); + HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant(); + + // The positions must be non-negative. + if ((src_pos != nullptr && src_pos->GetValue() < 0) || + (dest_pos != nullptr && dest_pos->GetValue() < 0)) { + // We will have to fail anyways. + return; + } + + // The length must be >= 0. + HIntConstant* length = invoke->InputAt(4)->AsIntConstant(); + if (length != nullptr) { + int32_t len = length->GetValue(); + if (len < 0) { + // Just call as normal. + return; + } + } + + SystemArrayCopyOptimizations optimizations(invoke); + + if (optimizations.GetDestinationIsSource()) { + if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) { + // We only support backward copying if source and destination are the same. + return; + } + } + + if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) { + // We currently don't intrinsify primitive copying. + return; + } + + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); + LocationSummary* locations = new (allocator) LocationSummary(invoke, + LocationSummary::kCallOnSlowPath, + kIntrinsified); + // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length). + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); + locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3))); + locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4))); + + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); +} + +void CodeGenerator::EmitJitRoots(uint8_t* code, + const uint8_t* roots_data, + /*out*/std::vector>* roots) { + code_generation_data_->EmitJitRoots(roots); + EmitJitRootPatches(code, roots_data); +} + +QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) { + switch (new_array->GetComponentSizeShift()) { + case 0: return kQuickAllocArrayResolved8; + case 1: return kQuickAllocArrayResolved16; + case 2: return kQuickAllocArrayResolved32; + case 3: return kQuickAllocArrayResolved64; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +} // namespace art diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h new file mode 100644 index 0000000..9e3e454 --- /dev/null +++ b/compiler/optimizing/code_generator.h @@ -0,0 +1,994 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ +#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "base/arena_containers.h" +#include "base/arena_object.h" +#include "base/array_ref.h" +#include "base/bit_field.h" +#include "base/bit_utils.h" +#include "base/enums.h" +#include "base/globals.h" +#include "base/memory_region.h" +#include "dex/string_reference.h" +#include "dex/type_reference.h" +#include "graph_visualizer.h" +#include "locations.h" +#include "nodes.h" +#include "optimizing_compiler_stats.h" +#include "read_barrier_option.h" +#include "stack.h" +#include "utils/label.h" + +namespace art { + +// Binary encoding of 2^32 for type double. +static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000); +// Binary encoding of 2^31 for type double. +static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000); + +// Minimum value for a primitive integer. +static int32_t constexpr kPrimIntMin = 0x80000000; +// Minimum value for a primitive long. +static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000); + +// Maximum value for a primitive integer. +static int32_t constexpr kPrimIntMax = 0x7fffffff; +// Maximum value for a primitive long. +static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff); + +static constexpr ReadBarrierOption kCompilerReadBarrierOption = + kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; + +class Assembler; +class CodeGenerator; +class CompilerOptions; +class StackMapStream; +class ParallelMoveResolver; + +namespace linker { +class LinkerPatch; +} // namespace linker + +class CodeAllocator { + public: + CodeAllocator() {} + virtual ~CodeAllocator() {} + + virtual uint8_t* Allocate(size_t size) = 0; + virtual ArrayRef GetMemory() const = 0; + + private: + DISALLOW_COPY_AND_ASSIGN(CodeAllocator); +}; + +class SlowPathCode : public DeletableArenaObject { + public: + explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) { + for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) { + saved_core_stack_offsets_[i] = kRegisterNotSaved; + saved_fpu_stack_offsets_[i] = kRegisterNotSaved; + } + } + + virtual ~SlowPathCode() {} + + virtual void EmitNativeCode(CodeGenerator* codegen) = 0; + + // Save live core and floating-point caller-save registers and + // update the stack mask in `locations` for registers holding object + // references. + virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); + // Restore live core and floating-point caller-save registers. + virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); + + bool IsCoreRegisterSaved(int reg) const { + return saved_core_stack_offsets_[reg] != kRegisterNotSaved; + } + + bool IsFpuRegisterSaved(int reg) const { + return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved; + } + + uint32_t GetStackOffsetOfCoreRegister(int reg) const { + return saved_core_stack_offsets_[reg]; + } + + uint32_t GetStackOffsetOfFpuRegister(int reg) const { + return saved_fpu_stack_offsets_[reg]; + } + + virtual bool IsFatal() const { return false; } + + virtual const char* GetDescription() const = 0; + + Label* GetEntryLabel() { return &entry_label_; } + Label* GetExitLabel() { return &exit_label_; } + + HInstruction* GetInstruction() const { + return instruction_; + } + + uint32_t GetDexPc() const { + return instruction_ != nullptr ? instruction_->GetDexPc() : kNoDexPc; + } + + protected: + static constexpr size_t kMaximumNumberOfExpectedRegisters = 32; + static constexpr uint32_t kRegisterNotSaved = -1; + // The instruction where this slow path is happening. + HInstruction* instruction_; + uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters]; + uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters]; + + private: + Label entry_label_; + Label exit_label_; + + DISALLOW_COPY_AND_ASSIGN(SlowPathCode); +}; + +class InvokeDexCallingConventionVisitor { + public: + virtual Location GetNextLocation(DataType::Type type) = 0; + virtual Location GetReturnLocation(DataType::Type type) const = 0; + virtual Location GetMethodLocation() const = 0; + + protected: + InvokeDexCallingConventionVisitor() {} + virtual ~InvokeDexCallingConventionVisitor() {} + + // The current index for core registers. + uint32_t gp_index_ = 0u; + // The current index for floating-point registers. + uint32_t float_index_ = 0u; + // The current stack index. + uint32_t stack_index_ = 0u; + + private: + DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor); +}; + +class FieldAccessCallingConvention { + public: + virtual Location GetObjectLocation() const = 0; + virtual Location GetFieldIndexLocation() const = 0; + virtual Location GetReturnLocation(DataType::Type type) const = 0; + virtual Location GetSetValueLocation(DataType::Type type, bool is_instance) const = 0; + virtual Location GetFpuLocation(DataType::Type type) const = 0; + virtual ~FieldAccessCallingConvention() {} + + protected: + FieldAccessCallingConvention() {} + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention); +}; + +class CodeGenerator : public DeletableArenaObject { + public: + // Compiles the graph to executable instructions. + void Compile(CodeAllocator* allocator); + static std::unique_ptr Create(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats = nullptr); + virtual ~CodeGenerator(); + + // Get the graph. This is the outermost graph, never the graph of a method being inlined. + HGraph* GetGraph() const { return graph_; } + + HBasicBlock* GetNextBlockToEmit() const; + HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const; + bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const; + + size_t GetStackSlotOfParameter(HParameterValue* parameter) const { + // Note that this follows the current calling convention. + return GetFrameSize() + + static_cast(InstructionSetPointerSize(GetInstructionSet())) // Art method + + parameter->GetIndex() * kVRegSize; + } + + virtual void Initialize() = 0; + virtual void Finalize(CodeAllocator* allocator); + virtual void EmitLinkerPatches(ArenaVector* linker_patches); + virtual bool NeedsThunkCode(const linker::LinkerPatch& patch) const; + virtual void EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector* code, + /*out*/ std::string* debug_name); + virtual void GenerateFrameEntry() = 0; + virtual void GenerateFrameExit() = 0; + virtual void Bind(HBasicBlock* block) = 0; + virtual void MoveConstant(Location destination, int32_t value) = 0; + virtual void MoveLocation(Location dst, Location src, DataType::Type dst_type) = 0; + virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0; + + virtual Assembler* GetAssembler() = 0; + virtual const Assembler& GetAssembler() const = 0; + virtual size_t GetWordSize() const = 0; + + // Get FP register width in bytes for spilling/restoring in the slow paths. + // + // Note: In SIMD graphs this should return SIMD register width as all FP and SIMD registers + // alias and live SIMD registers are forced to be spilled in full size in the slow paths. + virtual size_t GetSlowPathFPWidth() const { + // Default implementation. + return GetCalleePreservedFPWidth(); + } + + // Get FP register width required to be preserved by the target ABI. + virtual size_t GetCalleePreservedFPWidth() const = 0; + + virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0; + void InitializeCodeGeneration(size_t number_of_spill_slots, + size_t maximum_safepoint_spill_size, + size_t number_of_out_slots, + const ArenaVector& block_order); + // Backends can override this as necessary. For most, no special alignment is required. + virtual uint32_t GetPreferredSlotsAlignment() const { return 1; } + + uint32_t GetFrameSize() const { return frame_size_; } + void SetFrameSize(uint32_t size) { frame_size_ = size; } + uint32_t GetCoreSpillMask() const { return core_spill_mask_; } + uint32_t GetFpuSpillMask() const { return fpu_spill_mask_; } + + size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; } + size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; } + virtual void SetupBlockedRegisters() const = 0; + + virtual void ComputeSpillMask() { + core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; + DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved"; + fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; + } + + static uint32_t ComputeRegisterMask(const int* registers, size_t length) { + uint32_t mask = 0; + for (size_t i = 0, e = length; i < e; ++i) { + mask |= (1 << registers[i]); + } + return mask; + } + + virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0; + virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0; + virtual InstructionSet GetInstructionSet() const = 0; + + const CompilerOptions& GetCompilerOptions() const { return compiler_options_; } + + // Saves the register in the stack. Returns the size taken on stack. + virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0; + // Restores the register from the stack. Returns the size taken on stack. + virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0; + + virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; + virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0; + + virtual bool NeedsTwoRegisters(DataType::Type type) const = 0; + // Returns whether we should split long moves in parallel moves. + virtual bool ShouldSplitLongMoves() const { return false; } + + size_t GetNumberOfCoreCalleeSaveRegisters() const { + return POPCOUNT(core_callee_save_mask_); + } + + size_t GetNumberOfCoreCallerSaveRegisters() const { + DCHECK_GE(GetNumberOfCoreRegisters(), GetNumberOfCoreCalleeSaveRegisters()); + return GetNumberOfCoreRegisters() - GetNumberOfCoreCalleeSaveRegisters(); + } + + bool IsCoreCalleeSaveRegister(int reg) const { + return (core_callee_save_mask_ & (1 << reg)) != 0; + } + + bool IsFloatingPointCalleeSaveRegister(int reg) const { + return (fpu_callee_save_mask_ & (1 << reg)) != 0; + } + + uint32_t GetSlowPathSpills(LocationSummary* locations, bool core_registers) const { + DCHECK(locations->OnlyCallsOnSlowPath() || + (locations->Intrinsified() && locations->CallsOnMainAndSlowPath() && + !locations->HasCustomSlowPathCallingConvention())); + uint32_t live_registers = core_registers + ? locations->GetLiveRegisters()->GetCoreRegisters() + : locations->GetLiveRegisters()->GetFloatingPointRegisters(); + if (locations->HasCustomSlowPathCallingConvention()) { + // Save only the live registers that the custom calling convention wants us to save. + uint32_t caller_saves = core_registers + ? locations->GetCustomSlowPathCallerSaves().GetCoreRegisters() + : locations->GetCustomSlowPathCallerSaves().GetFloatingPointRegisters(); + return live_registers & caller_saves; + } else { + // Default ABI, we need to spill non-callee-save live registers. + uint32_t callee_saves = core_registers ? core_callee_save_mask_ : fpu_callee_save_mask_; + return live_registers & ~callee_saves; + } + } + + size_t GetNumberOfSlowPathSpills(LocationSummary* locations, bool core_registers) const { + return POPCOUNT(GetSlowPathSpills(locations, core_registers)); + } + + size_t GetStackOffsetOfShouldDeoptimizeFlag() const { + DCHECK(GetGraph()->HasShouldDeoptimizeFlag()); + DCHECK_GE(GetFrameSize(), FrameEntrySpillSize() + kShouldDeoptimizeFlagSize); + return GetFrameSize() - FrameEntrySpillSize() - kShouldDeoptimizeFlagSize; + } + + // Record native to dex mapping for a suspend point. Required by runtime. + void RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + uint32_t native_pc, + SlowPathCode* slow_path = nullptr, + bool native_debug_info = false); + + // Record native to dex mapping for a suspend point. + // The native_pc is used from Assembler::CodePosition. + // + // Note: As Assembler::CodePosition is target dependent, it does not guarantee the exact native_pc + // for the instruction. If the exact native_pc is required it must be provided explicitly. + void RecordPcInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr, + bool native_debug_info = false); + + // Check whether we have already recorded mapping at this PC. + bool HasStackMapAtCurrentPc(); + + // Record extra stack maps if we support native debugging. + // + // ARM specific behaviour: The recorded native PC might be a branch over pools to instructions + // corresponding the dex PC. + void MaybeRecordNativeDebugInfo(HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr); + + bool CanMoveNullCheckToUser(HNullCheck* null_check); + virtual void MaybeRecordImplicitNullCheck(HInstruction* instruction); + LocationSummary* CreateThrowingSlowPathLocations( + HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty()); + void GenerateNullCheck(HNullCheck* null_check); + virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0; + virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0; + + // Records a stack map which the runtime might use to set catch phi values + // during exception delivery. + // TODO: Replace with a catch-entering instruction that records the environment. + void RecordCatchBlockInfo(); + + // Get the ScopedArenaAllocator used for codegen memory allocation. + ScopedArenaAllocator* GetScopedAllocator(); + + void AddSlowPath(SlowPathCode* slow_path); + + ScopedArenaVector BuildStackMaps(const dex::CodeItem* code_item_for_osr_check); + size_t GetNumberOfJitRoots() const; + + // Fills the `literals` array with literals collected during code generation. + // Also emits literal patches. + void EmitJitRoots(uint8_t* code, + const uint8_t* roots_data, + /*out*/std::vector>* roots) + REQUIRES_SHARED(Locks::mutator_lock_); + + bool IsLeafMethod() const { + return is_leaf_; + } + + void MarkNotLeaf() { + is_leaf_ = false; + requires_current_method_ = true; + } + + void SetRequiresCurrentMethod() { + requires_current_method_ = true; + } + + bool RequiresCurrentMethod() const { + return requires_current_method_; + } + + // Clears the spill slots taken by loop phis in the `LocationSummary` of the + // suspend check. This is called when the code generator generates code + // for the suspend check at the back edge (instead of where the suspend check + // is, which is the loop entry). At this point, the spill slots for the phis + // have not been written to. + void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check, + HParallelMove* spills) const; + + bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; } + bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; } + + bool IsBlockedCoreRegister(size_t i) { return blocked_core_registers_[i]; } + bool IsBlockedFloatingPointRegister(size_t i) { return blocked_fpu_registers_[i]; } + + // Helper that returns the offset of the array's length field. + // Note: Besides the normal arrays, we also use the HArrayLength for + // accessing the String's `count` field in String intrinsics. + static uint32_t GetArrayLengthOffset(HArrayLength* array_length); + + // Helper that returns the offset of the array's data. + // Note: Besides the normal arrays, we also use the HArrayGet for + // accessing the String's `value` field in String intrinsics. + static uint32_t GetArrayDataOffset(HArrayGet* array_get); + + void EmitParallelMoves(Location from1, + Location to1, + DataType::Type type1, + Location from2, + Location to2, + DataType::Type type2); + + static bool InstanceOfNeedsReadBarrier(HInstanceOf* instance_of) { + // Used only for kExactCheck, kAbstractClassCheck, kClassHierarchyCheck and kArrayObjectCheck. + DCHECK(instance_of->GetTypeCheckKind() == TypeCheckKind::kExactCheck || + instance_of->GetTypeCheckKind() == TypeCheckKind::kAbstractClassCheck || + instance_of->GetTypeCheckKind() == TypeCheckKind::kClassHierarchyCheck || + instance_of->GetTypeCheckKind() == TypeCheckKind::kArrayObjectCheck) + << instance_of->GetTypeCheckKind(); + // If the target class is in the boot image, it's non-moveable and it doesn't matter + // if we compare it with a from-space or to-space reference, the result is the same. + // It's OK to traverse a class hierarchy jumping between from-space and to-space. + return kEmitCompilerReadBarrier && !instance_of->GetTargetClass()->IsInBootImage(); + } + + static ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) { + return InstanceOfNeedsReadBarrier(instance_of) ? kWithReadBarrier : kWithoutReadBarrier; + } + + static bool IsTypeCheckSlowPathFatal(HCheckCast* check_cast) { + switch (check_cast->GetTypeCheckKind()) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kAbstractClassCheck: + case TypeCheckKind::kClassHierarchyCheck: + case TypeCheckKind::kArrayObjectCheck: + case TypeCheckKind::kInterfaceCheck: { + bool needs_read_barrier = + kEmitCompilerReadBarrier && !check_cast->GetTargetClass()->IsInBootImage(); + // We do not emit read barriers for HCheckCast, so we can get false negatives + // and the slow path shall re-check and simply return if the cast is actually OK. + return !needs_read_barrier; + } + case TypeCheckKind::kArrayCheck: + case TypeCheckKind::kUnresolvedCheck: + return false; + case TypeCheckKind::kBitstringCheck: + return true; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + + static LocationSummary::CallKind GetCheckCastCallKind(HCheckCast* check_cast) { + return (IsTypeCheckSlowPathFatal(check_cast) && !check_cast->CanThrowIntoCatchBlock()) + ? LocationSummary::kNoCall // In fact, call on a fatal (non-returning) slow path. + : LocationSummary::kCallOnSlowPath; + } + + static bool StoreNeedsWriteBarrier(DataType::Type type, HInstruction* value) { + // Check that null value is not represented as an integer constant. + DCHECK(type != DataType::Type::kReference || !value->IsIntConstant()); + return type == DataType::Type::kReference && !value->IsNullConstant(); + } + + + // Performs checks pertaining to an InvokeRuntime call. + void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + SlowPathCode* slow_path); + + // Performs checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call. + static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction, + SlowPathCode* slow_path); + + void AddAllocatedRegister(Location location) { + allocated_registers_.Add(location); + } + + bool HasAllocatedRegister(bool is_core, int reg) const { + return is_core + ? allocated_registers_.ContainsCoreRegister(reg) + : allocated_registers_.ContainsFloatingPointRegister(reg); + } + + void AllocateLocations(HInstruction* instruction); + + // Tells whether the stack frame of the compiled method is + // considered "empty", that is either actually having a size of zero, + // or just containing the saved return address register. + bool HasEmptyFrame() const { + return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0); + } + + static int8_t GetInt8ValueOf(HConstant* constant) { + DCHECK(constant->IsIntConstant()); + return constant->AsIntConstant()->GetValue(); + } + + static int16_t GetInt16ValueOf(HConstant* constant) { + DCHECK(constant->IsIntConstant()); + return constant->AsIntConstant()->GetValue(); + } + + static int32_t GetInt32ValueOf(HConstant* constant) { + if (constant->IsIntConstant()) { + return constant->AsIntConstant()->GetValue(); + } else if (constant->IsNullConstant()) { + return 0; + } else { + DCHECK(constant->IsFloatConstant()); + return bit_cast(constant->AsFloatConstant()->GetValue()); + } + } + + static int64_t GetInt64ValueOf(HConstant* constant) { + if (constant->IsIntConstant()) { + return constant->AsIntConstant()->GetValue(); + } else if (constant->IsNullConstant()) { + return 0; + } else if (constant->IsFloatConstant()) { + return bit_cast(constant->AsFloatConstant()->GetValue()); + } else if (constant->IsLongConstant()) { + return constant->AsLongConstant()->GetValue(); + } else { + DCHECK(constant->IsDoubleConstant()); + return bit_cast(constant->AsDoubleConstant()->GetValue()); + } + } + + size_t GetFirstRegisterSlotInSlowPath() const { + return first_register_slot_in_slow_path_; + } + + uint32_t FrameEntrySpillSize() const { + return GetFpuSpillSize() + GetCoreSpillSize(); + } + + virtual ParallelMoveResolver* GetMoveResolver() = 0; + + static void CreateCommonInvokeLocationSummary( + HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor); + + void GenerateInvokeStaticOrDirectRuntimeCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path); + + void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke); + + void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke); + + void GenerateInvokeCustomCall(HInvokeCustom* invoke); + + void CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction, Location out); + + void CreateUnresolvedFieldLocationSummary( + HInstruction* field_access, + DataType::Type field_type, + const FieldAccessCallingConvention& calling_convention); + + void GenerateUnresolvedFieldAccess( + HInstruction* field_access, + DataType::Type field_type, + uint32_t field_index, + uint32_t dex_pc, + const FieldAccessCallingConvention& calling_convention); + + static void CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls, + Location runtime_type_index_location, + Location runtime_return_location); + void GenerateLoadClassRuntimeCall(HLoadClass* cls); + + static void CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle* method_handle, + Location runtime_handle_index_location, + Location runtime_return_location); + void GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle); + + static void CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType* method_type, + Location runtime_type_index_location, + Location runtime_return_location); + void GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type); + + uint32_t GetBootImageOffset(HLoadClass* load_class); + uint32_t GetBootImageOffset(HLoadString* load_string); + uint32_t GetBootImageOffset(HInvokeStaticOrDirect* invoke); + + static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke); + + void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; } + DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; } + + virtual void InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr) = 0; + + // Check if the desired_string_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + virtual HLoadString::LoadKind GetSupportedLoadStringKind( + HLoadString::LoadKind desired_string_load_kind) = 0; + + // Check if the desired_class_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + virtual HLoadClass::LoadKind GetSupportedLoadClassKind( + HLoadClass::LoadKind desired_class_load_kind) = 0; + + static LocationSummary::CallKind GetLoadStringCallKind(HLoadString* load) { + switch (load->GetLoadKind()) { + case HLoadString::LoadKind::kBssEntry: + DCHECK(load->NeedsEnvironment()); + return LocationSummary::kCallOnSlowPath; + case HLoadString::LoadKind::kRuntimeCall: + DCHECK(load->NeedsEnvironment()); + return LocationSummary::kCallOnMainOnly; + case HLoadString::LoadKind::kJitTableAddress: + DCHECK(!load->NeedsEnvironment()); + return kEmitCompilerReadBarrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall; + break; + default: + DCHECK(!load->NeedsEnvironment()); + return LocationSummary::kNoCall; + } + } + + // Check if the desired_dispatch_info is supported. If it is, return it, + // otherwise return a fall-back info that should be used instead. + virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( + const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, + ArtMethod* method) = 0; + + // Generate a call to a static or direct method. + virtual void GenerateStaticOrDirectCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) = 0; + // Generate a call to a virtual method. + virtual void GenerateVirtualCall( + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) = 0; + + // Copy the result of a call into the given target. + virtual void MoveFromReturnRegister(Location trg, DataType::Type type) = 0; + + virtual void GenerateNop() = 0; + + static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array); + + protected: + // Patch info used for recording locations of required linker patches and their targets, + // i.e. target method, string, type or code identified by their dex file and index, + // or .data.bimg.rel.ro entries identified by the boot image offset. + template + struct PatchInfo { + PatchInfo(const DexFile* dex_file, uint32_t off_or_idx) + : target_dex_file(dex_file), offset_or_index(off_or_idx), label() { } + + // Target dex file or null for .data.bmig.rel.ro patches. + const DexFile* target_dex_file; + // Either the boot image offset (to write to .data.bmig.rel.ro) or string/type/method index. + uint32_t offset_or_index; + // Label for the instruction to patch. + LabelType label; + }; + + CodeGenerator(HGraph* graph, + size_t number_of_core_registers, + size_t number_of_fpu_registers, + size_t number_of_register_pairs, + uint32_t core_callee_save_mask, + uint32_t fpu_callee_save_mask, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats); + + virtual HGraphVisitor* GetLocationBuilder() = 0; + virtual HGraphVisitor* GetInstructionVisitor() = 0; + + // Returns the location of the first spilled entry for floating point registers, + // relative to the stack pointer. + uint32_t GetFpuSpillStart() const { + return GetFrameSize() - FrameEntrySpillSize(); + } + + uint32_t GetFpuSpillSize() const { + return POPCOUNT(fpu_spill_mask_) * GetCalleePreservedFPWidth(); + } + + uint32_t GetCoreSpillSize() const { + return POPCOUNT(core_spill_mask_) * GetWordSize(); + } + + virtual bool HasAllocatedCalleeSaveRegisters() const { + // We check the core registers against 1 because it always comprises the return PC. + return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1) + || (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0); + } + + bool CallPushesPC() const { + InstructionSet instruction_set = GetInstructionSet(); + return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64; + } + + // Arm64 has its own type for a label, so we need to templatize these methods + // to share the logic. + + template + LabelType* CommonInitializeLabels() { + // We use raw array allocations instead of ArenaVector<> because Labels are + // non-constructible and non-movable and as such cannot be held in a vector. + size_t size = GetGraph()->GetBlocks().size(); + LabelType* labels = + GetGraph()->GetAllocator()->AllocArray(size, kArenaAllocCodeGenerator); + for (size_t i = 0; i != size; ++i) { + new(labels + i) LabelType(); + } + return labels; + } + + template + LabelType* CommonGetLabelOf(LabelType* raw_pointer_to_labels_array, HBasicBlock* block) const { + block = FirstNonEmptyBlock(block); + return raw_pointer_to_labels_array + block->GetBlockId(); + } + + SlowPathCode* GetCurrentSlowPath() { + return current_slow_path_; + } + + StackMapStream* GetStackMapStream(); + + void ReserveJitStringRoot(StringReference string_reference, Handle string); + uint64_t GetJitStringRootIndex(StringReference string_reference); + void ReserveJitClassRoot(TypeReference type_reference, Handle klass); + uint64_t GetJitClassRootIndex(TypeReference type_reference); + + // Emit the patches assocatied with JIT roots. Only applies to JIT compiled code. + virtual void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data); + + // Frame size required for this method. + uint32_t frame_size_; + uint32_t core_spill_mask_; + uint32_t fpu_spill_mask_; + uint32_t first_register_slot_in_slow_path_; + + // Registers that were allocated during linear scan. + RegisterSet allocated_registers_; + + // Arrays used when doing register allocation to know which + // registers we can allocate. `SetupBlockedRegisters` updates the + // arrays. + bool* const blocked_core_registers_; + bool* const blocked_fpu_registers_; + size_t number_of_core_registers_; + size_t number_of_fpu_registers_; + size_t number_of_register_pairs_; + const uint32_t core_callee_save_mask_; + const uint32_t fpu_callee_save_mask_; + + // The order to use for code generation. + const ArenaVector* block_order_; + + DisassemblyInformation* disasm_info_; + + private: + class CodeGenerationData; + + void InitializeCodeGenerationData(); + size_t GetStackOffsetOfSavedRegister(size_t index); + void GenerateSlowPaths(); + void BlockIfInRegister(Location location, bool is_out = false) const; + void EmitEnvironment(HEnvironment* environment, + SlowPathCode* slow_path, + bool needs_vreg_info = true); + void EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path); + + OptimizingCompilerStats* stats_; + + HGraph* const graph_; + const CompilerOptions& compiler_options_; + + // The current slow-path that we're generating code for. + SlowPathCode* current_slow_path_; + + // The current block index in `block_order_` of the block + // we are generating code for. + size_t current_block_index_; + + // Whether the method is a leaf method. + bool is_leaf_; + + // Whether an instruction in the graph accesses the current method. + // TODO: Rename: this actually indicates that some instruction in the method + // needs the environment including a valid stack frame. + bool requires_current_method_; + + // The CodeGenerationData contains a ScopedArenaAllocator intended for reusing the + // ArenaStack memory allocated in previous passes instead of adding to the memory + // held by the ArenaAllocator. This ScopedArenaAllocator is created in + // CodeGenerator::Compile() and remains alive until the CodeGenerator is destroyed. + std::unique_ptr code_generation_data_; + + friend class OptimizingCFITest; + ART_FRIEND_TEST(CodegenTest, ARM64FrameSizeSIMD); + ART_FRIEND_TEST(CodegenTest, ARM64FrameSizeNoSIMD); + + DISALLOW_COPY_AND_ASSIGN(CodeGenerator); +}; + +template +class CallingConvention { + public: + CallingConvention(const C* registers, + size_t number_of_registers, + const F* fpu_registers, + size_t number_of_fpu_registers, + PointerSize pointer_size) + : registers_(registers), + number_of_registers_(number_of_registers), + fpu_registers_(fpu_registers), + number_of_fpu_registers_(number_of_fpu_registers), + pointer_size_(pointer_size) {} + + size_t GetNumberOfRegisters() const { return number_of_registers_; } + size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; } + + C GetRegisterAt(size_t index) const { + DCHECK_LT(index, number_of_registers_); + return registers_[index]; + } + + F GetFpuRegisterAt(size_t index) const { + DCHECK_LT(index, number_of_fpu_registers_); + return fpu_registers_[index]; + } + + size_t GetStackOffsetOf(size_t index) const { + // We still reserve the space for parameters passed by registers. + // Add space for the method pointer. + return static_cast(pointer_size_) + index * kVRegSize; + } + + private: + const C* registers_; + const size_t number_of_registers_; + const F* fpu_registers_; + const size_t number_of_fpu_registers_; + const PointerSize pointer_size_; + + DISALLOW_COPY_AND_ASSIGN(CallingConvention); +}; + +/** + * A templated class SlowPathGenerator with a templated method NewSlowPath() + * that can be used by any code generator to share equivalent slow-paths with + * the objective of reducing generated code size. + * + * InstructionType: instruction that requires SlowPathCodeType + * SlowPathCodeType: subclass of SlowPathCode, with constructor SlowPathCodeType(InstructionType *) + */ +template +class SlowPathGenerator { + static_assert(std::is_base_of::value, + "InstructionType is not a subclass of art::HInstruction"); + + public: + SlowPathGenerator(HGraph* graph, CodeGenerator* codegen) + : graph_(graph), + codegen_(codegen), + slow_path_map_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocSlowPaths)) {} + + // Creates and adds a new slow-path, if needed, or returns existing one otherwise. + // Templating the method (rather than the whole class) on the slow-path type enables + // keeping this code at a generic, non architecture-specific place. + // + // NOTE: This approach assumes each InstructionType only generates one SlowPathCodeType. + // To relax this requirement, we would need some RTTI on the stored slow-paths, + // or template the class as a whole on SlowPathType. + template + SlowPathCodeType* NewSlowPath(InstructionType* instruction) { + static_assert(std::is_base_of::value, + "SlowPathCodeType is not a subclass of art::SlowPathCode"); + static_assert(std::is_constructible::value, + "SlowPathCodeType is not constructible from InstructionType*"); + // Iterate over potential candidates for sharing. Currently, only same-typed + // slow-paths with exactly the same dex-pc are viable candidates. + // TODO: pass dex-pc/slow-path-type to run-time to allow even more sharing? + const uint32_t dex_pc = instruction->GetDexPc(); + auto iter = slow_path_map_.find(dex_pc); + if (iter != slow_path_map_.end()) { + const ArenaVector>& candidates = iter->second; + for (const auto& it : candidates) { + InstructionType* other_instruction = it.first; + SlowPathCodeType* other_slow_path = down_cast(it.second); + // Determine if the instructions allow for slow-path sharing. + if (HaveSameLiveRegisters(instruction, other_instruction) && + HaveSameStackMap(instruction, other_instruction)) { + // Can share: reuse existing one. + return other_slow_path; + } + } + } else { + // First time this dex-pc is seen. + iter = slow_path_map_.Put(dex_pc, + {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}}); + } + // Cannot share: create and add new slow-path for this particular dex-pc. + SlowPathCodeType* slow_path = + new (codegen_->GetScopedAllocator()) SlowPathCodeType(instruction); + iter->second.emplace_back(std::make_pair(instruction, slow_path)); + codegen_->AddSlowPath(slow_path); + return slow_path; + } + + private: + // Tests if both instructions have same set of live physical registers. This ensures + // the slow-path has exactly the same preamble on saving these registers to stack. + bool HaveSameLiveRegisters(const InstructionType* i1, const InstructionType* i2) const { + const uint32_t core_spill = ~codegen_->GetCoreSpillMask(); + const uint32_t fpu_spill = ~codegen_->GetFpuSpillMask(); + RegisterSet* live1 = i1->GetLocations()->GetLiveRegisters(); + RegisterSet* live2 = i2->GetLocations()->GetLiveRegisters(); + return (((live1->GetCoreRegisters() & core_spill) == + (live2->GetCoreRegisters() & core_spill)) && + ((live1->GetFloatingPointRegisters() & fpu_spill) == + (live2->GetFloatingPointRegisters() & fpu_spill))); + } + + // Tests if both instructions have the same stack map. This ensures the interpreter + // will find exactly the same dex-registers at the same entries. + bool HaveSameStackMap(const InstructionType* i1, const InstructionType* i2) const { + DCHECK(i1->HasEnvironment()); + DCHECK(i2->HasEnvironment()); + // We conservatively test if the two instructions find exactly the same instructions + // and location in each dex-register. This guarantees they will have the same stack map. + HEnvironment* e1 = i1->GetEnvironment(); + HEnvironment* e2 = i2->GetEnvironment(); + if (e1->GetParent() != e2->GetParent() || e1->Size() != e2->Size()) { + return false; + } + for (size_t i = 0, sz = e1->Size(); i < sz; ++i) { + if (e1->GetInstructionAt(i) != e2->GetInstructionAt(i) || + !e1->GetLocationAt(i).Equals(e2->GetLocationAt(i))) { + return false; + } + } + return true; + } + + HGraph* const graph_; + CodeGenerator* const codegen_; + + // Map from dex-pc to vector of already existing instruction/slow-path pairs. + ArenaSafeMap>> slow_path_map_; + + DISALLOW_COPY_AND_ASSIGN(SlowPathGenerator); +}; + +class InstructionCodeGenerator : public HGraphVisitor { + public: + InstructionCodeGenerator(HGraph* graph, CodeGenerator* codegen) + : HGraphVisitor(graph), + deopt_slow_paths_(graph, codegen) {} + + protected: + // Add slow-path generator for each instruction/slow-path combination that desires sharing. + // TODO: under current regime, only deopt sharing make sense; extend later. + SlowPathGenerator deopt_slow_paths_; +}; + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_ diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc new file mode 100644 index 0000000..7d1b0ea --- /dev/null +++ b/compiler/optimizing/code_generator_arm64.cc @@ -0,0 +1,6575 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_arm64.h" + +#include "arch/arm64/asm_support_arm64.h" +#include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method-inl.h" +#include "base/bit_utils.h" +#include "base/bit_utils_iterator.h" +#include "class_table.h" +#include "code_generator_utils.h" +#include "compiled_method.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints_enum.h" +#include "gc/accounting/card_table.h" +#include "gc/space/image_space.h" +#include "heap_poisoning.h" +#include "intrinsics.h" +#include "intrinsics_arm64.h" +#include "linker/linker_patch.h" +#include "lock_word.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "offsets.h" +#include "thread.h" +#include "utils/arm64/assembler_arm64.h" +#include "utils/assembler.h" +#include "utils/stack_checks.h" + +using namespace vixl::aarch64; // NOLINT(build/namespaces) +using vixl::ExactAssemblyScope; +using vixl::CodeBufferCheckScope; +using vixl::EmissionCheckScope; + +#ifdef __ +#error "ARM64 Codegen VIXL macro-assembler macro already defined." +#endif + +namespace art { + +template +class GcRoot; + +namespace arm64 { + +using helpers::ARM64EncodableConstantOrRegister; +using helpers::ArtVixlRegCodeCoherentForRegSet; +using helpers::CPURegisterFrom; +using helpers::DRegisterFrom; +using helpers::FPRegisterFrom; +using helpers::HeapOperand; +using helpers::HeapOperandFrom; +using helpers::InputCPURegisterOrZeroRegAt; +using helpers::InputFPRegisterAt; +using helpers::InputOperandAt; +using helpers::InputRegisterAt; +using helpers::Int64FromLocation; +using helpers::IsConstantZeroBitPattern; +using helpers::LocationFrom; +using helpers::OperandFromMemOperand; +using helpers::OutputCPURegister; +using helpers::OutputFPRegister; +using helpers::OutputRegister; +using helpers::QRegisterFrom; +using helpers::RegisterFrom; +using helpers::StackOperandFrom; +using helpers::VIXLRegCodeFromART; +using helpers::WRegisterFrom; +using helpers::XRegisterFrom; + +// The compare/jump sequence will generate about (1.5 * num_entries + 3) instructions. While jump +// table version generates 7 instructions and num_entries literals. Compare/jump sequence will +// generates less code/data with a small num_entries. +static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7; + +// Reference load (except object array loads) is using LDR Wt, [Xn, #offset] which can handle +// offset < 16KiB. For offsets >= 16KiB, the load shall be emitted as two or more instructions. +// For the Baker read barrier implementation using link-time generated thunks we need to split +// the offset explicitly. +constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB; + +inline Condition ARM64Condition(IfCondition cond) { + switch (cond) { + case kCondEQ: return eq; + case kCondNE: return ne; + case kCondLT: return lt; + case kCondLE: return le; + case kCondGT: return gt; + case kCondGE: return ge; + case kCondB: return lo; + case kCondBE: return ls; + case kCondA: return hi; + case kCondAE: return hs; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +inline Condition ARM64FPCondition(IfCondition cond, bool gt_bias) { + // The ARM64 condition codes can express all the necessary branches, see the + // "Meaning (floating-point)" column in the table C1-1 in the ARMv8 reference manual. + // There is no dex instruction or HIR that would need the missing conditions + // "equal or unordered" or "not equal". + switch (cond) { + case kCondEQ: return eq; + case kCondNE: return ne /* unordered */; + case kCondLT: return gt_bias ? cc : lt /* unordered */; + case kCondLE: return gt_bias ? ls : le /* unordered */; + case kCondGT: return gt_bias ? hi /* unordered */ : gt; + case kCondGE: return gt_bias ? cs /* unordered */ : ge; + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +Location ARM64ReturnLocation(DataType::Type return_type) { + // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the + // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, + // but we use the exact registers for clarity. + if (return_type == DataType::Type::kFloat32) { + return LocationFrom(s0); + } else if (return_type == DataType::Type::kFloat64) { + return LocationFrom(d0); + } else if (return_type == DataType::Type::kInt64) { + return LocationFrom(x0); + } else if (return_type == DataType::Type::kVoid) { + return Location::NoLocation(); + } else { + return LocationFrom(w0); + } +} + +Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type return_type) { + return ARM64ReturnLocation(return_type); +} + +static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() { + InvokeRuntimeCallingConvention calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); + DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), + RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference), + DataType::Type::kReference).GetCode()); + return caller_saves; +} + +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(codegen)->GetVIXLAssembler()-> // NOLINT +#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value() + +// Calculate memory accessing operand for save/restore live registers. +static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen, + LocationSummary* locations, + int64_t spill_offset, + bool is_save) { + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills, + codegen->GetNumberOfCoreRegisters(), + fp_spills, + codegen->GetNumberOfFloatingPointRegisters())); + + CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills); + unsigned v_reg_size = codegen->GetGraph()->HasSIMD() ? kQRegSize : kDRegSize; + CPURegList fp_list = CPURegList(CPURegister::kVRegister, v_reg_size, fp_spills); + + MacroAssembler* masm = down_cast(codegen)->GetVIXLAssembler(); + UseScratchRegisterScope temps(masm); + + Register base = masm->StackPointer(); + int64_t core_spill_size = core_list.GetTotalSizeInBytes(); + int64_t fp_spill_size = fp_list.GetTotalSizeInBytes(); + int64_t reg_size = kXRegSizeInBytes; + int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size; + uint32_t ls_access_size = WhichPowerOf2(reg_size); + if (((core_list.GetCount() > 1) || (fp_list.GetCount() > 1)) && + !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) { + // If the offset does not fit in the instruction's immediate field, use an alternate register + // to compute the base address(float point registers spill base address). + Register new_base = temps.AcquireSameSizeAs(base); + __ Add(new_base, base, Operand(spill_offset + core_spill_size)); + base = new_base; + spill_offset = -core_spill_size; + int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size; + DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size)); + DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size)); + } + + if (is_save) { + __ StoreCPURegList(core_list, MemOperand(base, spill_offset)); + __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size)); + } else { + __ LoadCPURegList(core_list, MemOperand(base, spill_offset)); + __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size)); + } +} + +void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + // If the register holds an object, update the stack mask. + if (locations->RegisterContainsObject(i)) { + locations->SetStackBit(stack_offset / kVRegSize); + } + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_core_stack_offsets_[i] = stack_offset; + stack_offset += kXRegSizeInBytes; + } + + const size_t fp_reg_size = codegen->GetGraph()->HasSIMD() ? kQRegSizeInBytes : kDRegSizeInBytes; + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_fpu_stack_offsets_[i] = stack_offset; + stack_offset += fp_reg_size; + } + + SaveRestoreLiveRegistersHelper(codegen, + locations, + codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true); +} + +void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + SaveRestoreLiveRegistersHelper(codegen, + locations, + codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false); +} + +class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + + __ Bind(GetEntryLabel()); + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + codegen->EmitParallelMoves(locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kInt32, + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32); + QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() + ? kQuickThrowStringBounds + : kQuickThrowArrayBounds; + arm64_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "BoundsCheckSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); +}; + +class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "DivZeroCheckSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); +}; + +class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { + public: + LoadClassSlowPathARM64(HLoadClass* cls, HInstruction* at) + : SlowPathCodeARM64(at), cls_(cls) { + DCHECK(at->IsLoadClass() || at->IsClinitCheck()); + DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Location out = locations->Out(); + const uint32_t dex_pc = instruction_->GetDexPc(); + bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath(); + bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck(); + + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + if (must_resolve_type) { + DCHECK(IsSameDexFile(cls_->GetDexFile(), arm64_codegen->GetGraph()->GetDexFile())); + dex::TypeIndex type_index = cls_->GetTypeIndex(); + __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_); + arm64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this); + CheckEntrypointTypes(); + // If we also must_do_clinit, the resolved type is now in the correct register. + } else { + DCHECK(must_do_clinit); + Location source = instruction_->IsLoadClass() ? out : locations->InAt(0); + arm64_codegen->MoveLocation(LocationFrom(calling_convention.GetRegisterAt(0)), + source, + cls_->GetType()); + } + if (must_do_clinit) { + arm64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this); + CheckEntrypointTypes(); + } + + // Move the class to the desired location. + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + DataType::Type type = instruction_->GetType(); + arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); + } + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadClassSlowPathARM64"; } + + private: + // The class this slow path will load. + HLoadClass* const cls_; + + DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); +}; + +class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit LoadStringSlowPathARM64(HLoadString* instruction) + : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex(); + __ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_); + arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + DataType::Type type = instruction_->GetType(); + arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); + + RestoreLiveRegisters(codegen, locations); + + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadStringSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); +}; + +class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + arm64_codegen->InvokeRuntime(kQuickThrowNullPointer, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "NullCheckSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); +}; + +class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { + public: + SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor) + : SlowPathCodeARM64(instruction), successor_(successor) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); // Only saves live 128-bit regs for SIMD. + arm64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + RestoreLiveRegisters(codegen, locations); // Only restores live 128-bit regs for SIMD. + if (successor_ == nullptr) { + __ B(GetReturnLabel()); + } else { + __ B(arm64_codegen->GetLabelOf(successor_)); + } + } + + vixl::aarch64::Label* GetReturnLabel() { + DCHECK(successor_ == nullptr); + return &return_label_; + } + + HBasicBlock* GetSuccessor() const { + return successor_; + } + + const char* GetDescription() const override { return "SuspendCheckSlowPathARM64"; } + + private: + // If not null, the block to branch to after the suspend check. + HBasicBlock* const successor_; + + // If `successor_` is null, the label to branch to after the suspend check. + vixl::aarch64::Label return_label_; + + DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); +}; + +class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { + public: + TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal) + : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + + DCHECK(instruction_->IsCheckCast() + || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + uint32_t dex_pc = instruction_->GetDexPc(); + + __ Bind(GetEntryLabel()); + + if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) { + SaveLiveRegisters(codegen, locations); + } + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + codegen->EmitParallelMoves(locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kReference); + if (instruction_->IsInstanceOf()) { + arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); + CheckEntrypointTypes(); + DataType::Type ret_type = instruction_->GetType(); + Location ret_loc = calling_convention.GetReturnLocation(ret_type); + arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); + } else { + DCHECK(instruction_->IsCheckCast()); + arm64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); + CheckEntrypointTypes(); + } + + if (!is_fatal_) { + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + } + + const char* GetDescription() const override { return "TypeCheckSlowPathARM64"; } + bool IsFatal() const override { return is_fatal_; } + + private: + const bool is_fatal_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); +}; + +class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction) + : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + LocationSummary* locations = instruction_->GetLocations(); + SaveLiveRegisters(codegen, locations); + InvokeRuntimeCallingConvention calling_convention; + __ Mov(calling_convention.GetRegisterAt(0), + static_cast(instruction_->AsDeoptimize()->GetDeoptimizationKind())); + arm64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + const char* GetDescription() const override { return "DeoptimizationSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64); +}; + +class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove( + locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + nullptr); + parallel_move.AddMove( + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32, + nullptr); + parallel_move.AddMove( + locations->InAt(2), + LocationFrom(calling_convention.GetRegisterAt(2)), + DataType::Type::kReference, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + arm64_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "ArraySetSlowPathARM64"; } + + private: + DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64); +}; + +void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) { + uint32_t num_entries = switch_instr_->GetNumEntries(); + DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold); + + // We are about to use the assembler to place literals directly. Make sure we have enough + // underlying code buffer and we have generated the jump table with right size. + EmissionCheckScope scope(codegen->GetVIXLAssembler(), + num_entries * sizeof(int32_t), + CodeBufferCheckScope::kExactSize); + + __ Bind(&table_start_); + const ArenaVector& successors = switch_instr_->GetBlock()->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + vixl::aarch64::Label* target_label = codegen->GetLabelOf(successors[i]); + DCHECK(target_label->IsBound()); + ptrdiff_t jump_offset = target_label->GetLocation() - table_start_.GetLocation(); + DCHECK_GT(jump_offset, std::numeric_limits::min()); + DCHECK_LE(jump_offset, std::numeric_limits::max()); + Literal literal(jump_offset); + __ place(&literal); + } +} + +// Slow path generating a read barrier for a heap reference. +class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { + public: + ReadBarrierForHeapReferenceSlowPathARM64(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) + : SlowPathCodeARM64(instruction), + out_(out), + ref_(ref), + obj_(obj), + offset_(offset), + index_(index) { + DCHECK(kEmitCompilerReadBarrier); + // If `obj` is equal to `out` or `ref`, it means the initial object + // has been overwritten by (or after) the heap object reference load + // to be instrumented, e.g.: + // + // __ Ldr(out, HeapOperand(out, class_offset); + // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); + // + // In that case, we have lost the information about the original + // object, and the emitted read barrier cannot work properly. + DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out; + DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; + } + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + LocationSummary* locations = instruction_->GetLocations(); + DataType::Type type = DataType::Type::kReference; + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); + DCHECK(instruction_->IsInstanceFieldGet() || + instruction_->IsStaticFieldGet() || + instruction_->IsArrayGet() || + instruction_->IsInstanceOf() || + instruction_->IsCheckCast() || + (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier for heap reference slow path: " + << instruction_->DebugName(); + // The read barrier instrumentation of object ArrayGet + // instructions does not support the HIntermediateAddress + // instruction. + DCHECK(!(instruction_->IsArrayGet() && + instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress())); + + __ Bind(GetEntryLabel()); + + SaveLiveRegisters(codegen, locations); + + // We may have to change the index's value, but as `index_` is a + // constant member (like other "inputs" of this slow path), + // introduce a copy of it, `index`. + Location index = index_; + if (index_.IsValid()) { + // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics. + if (instruction_->IsArrayGet()) { + // Compute the actual memory offset and store it in `index`. + Register index_reg = RegisterFrom(index_, DataType::Type::kInt32); + DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_.reg())); + if (codegen->IsCoreCalleeSaveRegister(index_.reg())) { + // We are about to change the value of `index_reg` (see the + // calls to vixl::MacroAssembler::Lsl and + // vixl::MacroAssembler::Mov below), but it has + // not been saved by the previous call to + // art::SlowPathCode::SaveLiveRegisters, as it is a + // callee-save register -- + // art::SlowPathCode::SaveLiveRegisters does not consider + // callee-save registers, as it has been designed with the + // assumption that callee-save registers are supposed to be + // handled by the called function. So, as a callee-save + // register, `index_reg` _would_ eventually be saved onto + // the stack, but it would be too late: we would have + // changed its value earlier. Therefore, we manually save + // it here into another freely available register, + // `free_reg`, chosen of course among the caller-save + // registers (as a callee-save `free_reg` register would + // exhibit the same problem). + // + // Note we could have requested a temporary register from + // the register allocator instead; but we prefer not to, as + // this is a slow path, and we know we can find a + // caller-save register that is available. + Register free_reg = FindAvailableCallerSaveRegister(codegen); + __ Mov(free_reg.W(), index_reg); + index_reg = free_reg; + index = LocationFrom(index_reg); + } else { + // The initial register stored in `index_` has already been + // saved in the call to art::SlowPathCode::SaveLiveRegisters + // (as it is not a callee-save register), so we can freely + // use it. + } + // Shifting the index value contained in `index_reg` by the scale + // factor (2) cannot overflow in practice, as the runtime is + // unable to allocate object arrays with a size larger than + // 2^26 - 1 (that is, 2^28 - 4 bytes). + __ Lsl(index_reg, index_reg, DataType::SizeShift(type)); + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + __ Add(index_reg, index_reg, Operand(offset_)); + } else { + // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile + // intrinsics, `index_` is not shifted by a scale factor of 2 + // (as in the case of ArrayGet), as it is actually an offset + // to an object field within an object. + DCHECK(instruction_->IsInvoke()) << instruction_->DebugName(); + DCHECK(instruction_->GetLocations()->Intrinsified()); + DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) || + (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)) + << instruction_->AsInvoke()->GetIntrinsic(); + DCHECK_EQ(offset_, 0u); + DCHECK(index_.IsRegister()); + } + } + + // We're moving two or three locations to locations that could + // overlap, so we need a parallel move resolver. + InvokeRuntimeCallingConvention calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove(ref_, + LocationFrom(calling_convention.GetRegisterAt(0)), + type, + nullptr); + parallel_move.AddMove(obj_, + LocationFrom(calling_convention.GetRegisterAt(1)), + type, + nullptr); + if (index.IsValid()) { + parallel_move.AddMove(index, + LocationFrom(calling_convention.GetRegisterAt(2)), + DataType::Type::kInt32, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + } else { + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + arm64_codegen->MoveConstant(LocationFrom(calling_convention.GetRegisterAt(2)), offset_); + } + arm64_codegen->InvokeRuntime(kQuickReadBarrierSlow, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes< + kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); + arm64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type); + + RestoreLiveRegisters(codegen, locations); + + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathARM64"; } + + private: + Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { + size_t ref = static_cast(XRegisterFrom(ref_).GetCode()); + size_t obj = static_cast(XRegisterFrom(obj_).GetCode()); + for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { + if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) { + return Register(VIXLRegCodeFromART(i), kXRegSize); + } + } + // We shall never fail to find a free caller-save register, as + // there are more than two core caller-save registers on ARM64 + // (meaning it is possible to find one which is different from + // `ref` and `obj`). + DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u); + LOG(FATAL) << "Could not find a free register"; + UNREACHABLE(); + } + + const Location out_; + const Location ref_; + const Location obj_; + const uint32_t offset_; + // An additional location containing an index to an array. + // Only used for HArrayGet and the UnsafeGetObject & + // UnsafeGetObjectVolatile intrinsics. + const Location index_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathARM64); +}; + +// Slow path generating a read barrier for a GC root. +class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { + public: + ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root) + : SlowPathCodeARM64(instruction), out_(out), root_(root) { + DCHECK(kEmitCompilerReadBarrier); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + DataType::Type type = DataType::Type::kReference; + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg())); + DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) + << "Unexpected instruction in read barrier for GC root slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + CodeGeneratorARM64* arm64_codegen = down_cast(codegen); + // The argument of the ReadBarrierForRootSlow is not a managed + // reference (`mirror::Object*`), but a `GcRoot*`; + // thus we need a 64-bit move here, and we cannot use + // + // arm64_codegen->MoveLocation( + // LocationFrom(calling_convention.GetRegisterAt(0)), + // root_, + // type); + // + // which would emit a 32-bit move, as `type` is a (32-bit wide) + // reference type (`DataType::Type::kReference`). + __ Mov(calling_convention.GetRegisterAt(0), XRegisterFrom(out_)); + arm64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes*>(); + arm64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type); + + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARM64"; } + + private: + const Location out_; + const Location root_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARM64); +}; + +#undef __ + +Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(DataType::Type type) { + Location next_location; + if (type == DataType::Type::kVoid) { + LOG(FATAL) << "Unreachable type " << type; + } + + if (DataType::IsFloatingPointType(type) && + (float_index_ < calling_convention.GetNumberOfFpuRegisters())) { + next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++)); + } else if (!DataType::IsFloatingPointType(type) && + (gp_index_ < calling_convention.GetNumberOfRegisters())) { + next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); + } else { + size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); + next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) + : Location::StackSlot(stack_offset); + } + + // Space on the stack is reserved for all arguments. + stack_index_ += DataType::Is64BitType(type) ? 2 : 1; + return next_location; +} + +Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const { + return LocationFrom(kArtMethodRegister); +} + +CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) + : CodeGenerator(graph, + kNumberOfAllocatableRegisters, + kNumberOfAllocatableFPRegisters, + kNumberOfAllocatableRegisterPairs, + callee_saved_core_registers.GetList(), + callee_saved_fp_registers.GetList(), + compiler_options, + stats), + block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + location_builder_(graph, this), + instruction_visitor_(graph, this), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator(), + compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()), + boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + call_entrypoint_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + uint32_literals_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + uint64_literals_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(StringReferenceValueComparator(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(TypeReferenceValueComparator(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_baker_read_barrier_slow_paths_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + // Save the link register (containing the return address) to mimic Quick. + AddAllocatedRegister(LocationFrom(lr)); +} + +#define __ GetVIXLAssembler()-> + +void CodeGeneratorARM64::EmitJumpTables() { + for (auto&& jump_table : jump_tables_) { + jump_table->EmitTable(this); + } +} + +void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { + EmitJumpTables(); + + // Emit JIT baker read barrier slow paths. + DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); + for (auto& entry : jit_baker_read_barrier_slow_paths_) { + uint32_t encoded_data = entry.first; + vixl::aarch64::Label* slow_path_entry = &entry.second.label; + __ Bind(slow_path_entry); + CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr); + } + + // Ensure we emit the literal pool. + __ FinalizeCode(); + + CodeGenerator::Finalize(allocator); + + // Verify Baker read barrier linker patches. + if (kIsDebugBuild) { + ArrayRef code = allocator->GetMemory(); + for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { + DCHECK(info.label.IsBound()); + uint32_t literal_offset = info.label.GetLocation(); + DCHECK_ALIGNED(literal_offset, 4u); + + auto GetInsn = [&code](uint32_t offset) { + DCHECK_ALIGNED(offset, 4u); + return + (static_cast(code[offset + 0]) << 0) + + (static_cast(code[offset + 1]) << 8) + + (static_cast(code[offset + 2]) << 16)+ + (static_cast(code[offset + 3]) << 24); + }; + + const uint32_t encoded_data = info.custom_data; + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + // Check that the next instruction matches the expected LDR. + switch (kind) { + case BakerReadBarrierKind::kField: + case BakerReadBarrierKind::kAcquire: { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(literal_offset + 4u); + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + if (kind == BakerReadBarrierKind::kField) { + // LDR (immediate) with correct base_reg. + CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); + } else { + DCHECK(kind == BakerReadBarrierKind::kAcquire); + // LDAR with correct base_reg. + CHECK_EQ(next_insn & 0xffffffe0u, 0x88dffc00u | (base_reg << 5)); + } + break; + } + case BakerReadBarrierKind::kArray: { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(literal_offset + 4u); + // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), + // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); + CheckValidReg((next_insn >> 16) & 0x1f); // Check index register + break; + } + case BakerReadBarrierKind::kGcRoot: { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn(literal_offset - 4u); + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + // Usually LDR (immediate) with correct root_reg but + // we may have a "MOV marked, old_value" for UnsafeCASObject. + if ((prev_insn & 0xffe0ffff) != (0x2a0003e0 | root_reg)) { // MOV? + CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); // LDR? + } + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast(kind); + UNREACHABLE(); + } + } + } +} + +void ParallelMoveResolverARM64::PrepareForEmitNativeCode() { + // Note: There are 6 kinds of moves: + // 1. constant -> GPR/FPR (non-cycle) + // 2. constant -> stack (non-cycle) + // 3. GPR/FPR -> GPR/FPR + // 4. GPR/FPR -> stack + // 5. stack -> GPR/FPR + // 6. stack -> stack (non-cycle) + // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5 + // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting + // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the + // dependency. + vixl_temps_.Open(GetVIXLAssembler()); +} + +void ParallelMoveResolverARM64::FinishEmitNativeCode() { + vixl_temps_.Close(); +} + +Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) { + DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister + || kind == Location::kStackSlot || kind == Location::kDoubleStackSlot + || kind == Location::kSIMDStackSlot); + kind = (kind == Location::kFpuRegister || kind == Location::kSIMDStackSlot) + ? Location::kFpuRegister + : Location::kRegister; + Location scratch = GetScratchLocation(kind); + if (!scratch.Equals(Location::NoLocation())) { + return scratch; + } + // Allocate from VIXL temp registers. + if (kind == Location::kRegister) { + scratch = LocationFrom(vixl_temps_.AcquireX()); + } else { + DCHECK_EQ(kind, Location::kFpuRegister); + scratch = LocationFrom(codegen_->GetGraph()->HasSIMD() + ? vixl_temps_.AcquireVRegisterOfSize(kQRegSize) + : vixl_temps_.AcquireD()); + } + AddScratchLocation(scratch); + return scratch; +} + +void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) { + if (loc.IsRegister()) { + vixl_temps_.Release(XRegisterFrom(loc)); + } else { + DCHECK(loc.IsFpuRegister()); + vixl_temps_.Release(codegen_->GetGraph()->HasSIMD() ? QRegisterFrom(loc) : DRegisterFrom(loc)); + } + RemoveScratchLocation(loc); +} + +void ParallelMoveResolverARM64::EmitMove(size_t index) { + MoveOperands* move = moves_[index]; + codegen_->MoveLocation(move->GetDestination(), move->GetSource(), DataType::Type::kVoid); +} + +void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) { + MacroAssembler* masm = GetVIXLAssembler(); + if (GetCompilerOptions().CountHotnessInCompiledCode()) { + UseScratchRegisterScope temps(masm); + Register counter = temps.AcquireX(); + Register method = is_frame_entry ? kArtMethodRegister : temps.AcquireX(); + if (!is_frame_entry) { + __ Ldr(method, MemOperand(sp, 0)); + } + __ Ldrh(counter, MemOperand(method, ArtMethod::HotnessCountOffset().Int32Value())); + __ Add(counter, counter, 1); + // Subtract one if the counter would overflow. + __ Sub(counter, counter, Operand(counter, LSR, 16)); + __ Strh(counter, MemOperand(method, ArtMethod::HotnessCountOffset().Int32Value())); + } + + if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + ScopedObjectAccess soa(Thread::Current()); + ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize); + if (info != nullptr) { + uint64_t address = reinterpret_cast64(info); + vixl::aarch64::Label done; + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireX(); + Register counter = temps.AcquireW(); + __ Mov(temp, address); + __ Ldrh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value())); + __ Add(counter, counter, 1); + __ Strh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value())); + __ Tst(counter, 0xffff); + __ B(ne, &done); + if (is_frame_entry) { + if (HasEmptyFrame()) { + // The entyrpoint expects the method at the bottom of the stack. We + // claim stack space necessary for alignment. + __ Claim(kStackAlignment); + __ Stp(kArtMethodRegister, lr, MemOperand(sp, 0)); + } else if (!RequiresCurrentMethod()) { + __ Str(kArtMethodRegister, MemOperand(sp, 0)); + } + } else { + CHECK(RequiresCurrentMethod()); + } + uint32_t entrypoint_offset = + GetThreadOffset(kQuickCompileOptimized).Int32Value(); + __ Ldr(lr, MemOperand(tr, entrypoint_offset)); + // Note: we don't record the call here (and therefore don't generate a stack + // map), as the entrypoint should never be suspended. + __ Blr(lr); + if (HasEmptyFrame()) { + CHECK(is_frame_entry); + __ Ldr(lr, MemOperand(sp, 8)); + __ Drop(kStackAlignment); + } + __ Bind(&done); + } + } +} + +void CodeGeneratorARM64::GenerateFrameEntry() { + MacroAssembler* masm = GetVIXLAssembler(); + __ Bind(&frame_entry_label_); + + bool do_overflow_check = + FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod(); + if (do_overflow_check) { + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireX(); + DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); + __ Sub(temp, sp, static_cast(GetStackOverflowReservedBytes(InstructionSet::kArm64))); + { + // Ensure that between load and RecordPcInfo there are no pools emitted. + ExactAssemblyScope eas(GetVIXLAssembler(), + kInstructionSize, + CodeBufferCheckScope::kExactSize); + __ ldr(wzr, MemOperand(temp, 0)); + RecordPcInfo(nullptr, 0); + } + } + + if (!HasEmptyFrame()) { + // Stack layout: + // sp[frame_size - 8] : lr. + // ... : other preserved core registers. + // ... : other preserved fp registers. + // ... : reserved frame space. + // sp[0] : current method. + int32_t frame_size = dchecked_integral_cast(GetFrameSize()); + uint32_t core_spills_offset = frame_size - GetCoreSpillSize(); + CPURegList preserved_core_registers = GetFramePreservedCoreRegisters(); + DCHECK(!preserved_core_registers.IsEmpty()); + uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize(); + CPURegList preserved_fp_registers = GetFramePreservedFPRegisters(); + + // Save the current method if we need it, or if using STP reduces code + // size. Note that we do not do this in HCurrentMethod, as the + // instruction might have been removed in the SSA graph. + CPURegister lowest_spill; + if (core_spills_offset == kXRegSizeInBytes) { + // If there is no gap between the method and the lowest core spill, use + // aligned STP pre-index to store both. Max difference is 512. We do + // that to reduce code size even if we do not have to save the method. + DCHECK_LE(frame_size, 512); // 32 core registers are only 256 bytes. + lowest_spill = preserved_core_registers.PopLowestIndex(); + __ Stp(kArtMethodRegister, lowest_spill, MemOperand(sp, -frame_size, PreIndex)); + } else if (RequiresCurrentMethod()) { + __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex)); + } else { + __ Claim(frame_size); + } + GetAssembler()->cfi().AdjustCFAOffset(frame_size); + if (lowest_spill.IsValid()) { + GetAssembler()->cfi().RelOffset(DWARFReg(lowest_spill), core_spills_offset); + core_spills_offset += kXRegSizeInBytes; + } + GetAssembler()->SpillRegisters(preserved_core_registers, core_spills_offset); + GetAssembler()->SpillRegisters(preserved_fp_registers, fp_spills_offset); + + if (GetGraph()->HasShouldDeoptimizeFlag()) { + // Initialize should_deoptimize flag to 0. + Register wzr = Register(VIXLRegCodeFromART(WZR), kWRegSize); + __ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag())); + } + } + MaybeIncrementHotness(/* is_frame_entry= */ true); + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void CodeGeneratorARM64::GenerateFrameExit() { + GetAssembler()->cfi().RememberState(); + if (!HasEmptyFrame()) { + int32_t frame_size = dchecked_integral_cast(GetFrameSize()); + uint32_t core_spills_offset = frame_size - GetCoreSpillSize(); + CPURegList preserved_core_registers = GetFramePreservedCoreRegisters(); + DCHECK(!preserved_core_registers.IsEmpty()); + uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize(); + CPURegList preserved_fp_registers = GetFramePreservedFPRegisters(); + + CPURegister lowest_spill; + if (core_spills_offset == kXRegSizeInBytes) { + // If there is no gap between the method and the lowest core spill, use + // aligned LDP pre-index to pop both. Max difference is 504. We do + // that to reduce code size even though the loaded method is unused. + DCHECK_LE(frame_size, 504); // 32 core registers are only 256 bytes. + lowest_spill = preserved_core_registers.PopLowestIndex(); + core_spills_offset += kXRegSizeInBytes; + } + GetAssembler()->UnspillRegisters(preserved_fp_registers, fp_spills_offset); + GetAssembler()->UnspillRegisters(preserved_core_registers, core_spills_offset); + if (lowest_spill.IsValid()) { + __ Ldp(xzr, lowest_spill, MemOperand(sp, frame_size, PostIndex)); + GetAssembler()->cfi().Restore(DWARFReg(lowest_spill)); + } else { + __ Drop(frame_size); + } + GetAssembler()->cfi().AdjustCFAOffset(-frame_size); + } + __ Ret(); + GetAssembler()->cfi().RestoreState(); + GetAssembler()->cfi().DefCFAOffset(GetFrameSize()); +} + +CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const { + DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0)); + return CPURegList(CPURegister::kRegister, kXRegSize, + core_spill_mask_); +} + +CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const { + DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_, + GetNumberOfFloatingPointRegisters())); + return CPURegList(CPURegister::kVRegister, kDRegSize, + fpu_spill_mask_); +} + +void CodeGeneratorARM64::Bind(HBasicBlock* block) { + __ Bind(GetLabelOf(block)); +} + +void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) { + DCHECK(location.IsRegister()); + __ Mov(RegisterFrom(location, DataType::Type::kInt32), value); +} + +void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + +void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register card = temps.AcquireX(); + Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. + vixl::aarch64::Label done; + if (value_can_be_null) { + __ Cbz(value, &done); + } + // Load the address of the card table into `card`. + __ Ldr(card, MemOperand(tr, Thread::CardTableOffset().Int32Value())); + // Calculate the offset (in the card table) of the card corresponding to + // `object`. + __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); + // Write the `art::gc::accounting::CardTable::kCardDirty` value into the + // `object`'s card. + // + // Register `card` contains the address of the card table. Note that the card + // table's base is biased during its creation so that it always starts at an + // address whose least-significant byte is equal to `kCardDirty` (see + // art::gc::accounting::CardTable::Create). Therefore the STRB instruction + // below writes the `kCardDirty` (byte) value into the `object`'s card + // (located at `card + object >> kCardShift`). + // + // This dual use of the value in register `card` (1. to calculate the location + // of the card to mark; and 2. to load the `kCardDirty` value) saves a load + // (no need to explicitly load `kCardDirty` as an immediate value). + __ Strb(card, MemOperand(card, temp.X())); + if (value_can_be_null) { + __ Bind(&done); + } +} + +void CodeGeneratorARM64::SetupBlockedRegisters() const { + // Blocked core registers: + // lr : Runtime reserved. + // tr : Runtime reserved. + // mr : Runtime reserved. + // ip1 : VIXL core temp. + // ip0 : VIXL core temp. + // x18 : Platform register. + // + // Blocked fp registers: + // d31 : VIXL fp temp. + CPURegList reserved_core_registers = vixl_reserved_core_registers; + reserved_core_registers.Combine(runtime_reserved_core_registers); + while (!reserved_core_registers.IsEmpty()) { + blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true; + } + blocked_core_registers_[X18] = true; + + CPURegList reserved_fp_registers = vixl_reserved_fp_registers; + while (!reserved_fp_registers.IsEmpty()) { + blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().GetCode()] = true; + } + + if (GetGraph()->IsDebuggable()) { + // Stubs do not save callee-save floating point registers. If the graph + // is debuggable, we need to deal with these registers differently. For + // now, just block them. + CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers; + while (!reserved_fp_registers_debuggable.IsEmpty()) { + blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().GetCode()] = true; + } + } +} + +size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { + Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); + __ Str(reg, MemOperand(sp, stack_index)); + return kArm64WordSize; +} + +size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { + Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); + __ Ldr(reg, MemOperand(sp, stack_index)); + return kArm64WordSize; +} + +size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + LOG(FATAL) << "FP registers shouldn't be saved/restored individually, " + << "use SaveRestoreLiveRegistersHelper"; + UNREACHABLE(); +} + +size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + LOG(FATAL) << "FP registers shouldn't be saved/restored individually, " + << "use SaveRestoreLiveRegistersHelper"; + UNREACHABLE(); +} + +void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { + stream << XRegister(reg); +} + +void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { + stream << DRegister(reg); +} + +const Arm64InstructionSetFeatures& CodeGeneratorARM64::GetInstructionSetFeatures() const { + return *GetCompilerOptions().GetInstructionSetFeatures()->AsArm64InstructionSetFeatures(); +} + +void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { + if (constant->IsIntConstant()) { + __ Mov(Register(destination), constant->AsIntConstant()->GetValue()); + } else if (constant->IsLongConstant()) { + __ Mov(Register(destination), constant->AsLongConstant()->GetValue()); + } else if (constant->IsNullConstant()) { + __ Mov(Register(destination), 0); + } else if (constant->IsFloatConstant()) { + __ Fmov(VRegister(destination), constant->AsFloatConstant()->GetValue()); + } else { + DCHECK(constant->IsDoubleConstant()); + __ Fmov(VRegister(destination), constant->AsDoubleConstant()->GetValue()); + } +} + + +static bool CoherentConstantAndType(Location constant, DataType::Type type) { + DCHECK(constant.IsConstant()); + HConstant* cst = constant.GetConstant(); + return (cst->IsIntConstant() && type == DataType::Type::kInt32) || + // Null is mapped to a core W register, which we associate with kPrimInt. + (cst->IsNullConstant() && type == DataType::Type::kInt32) || + (cst->IsLongConstant() && type == DataType::Type::kInt64) || + (cst->IsFloatConstant() && type == DataType::Type::kFloat32) || + (cst->IsDoubleConstant() && type == DataType::Type::kFloat64); +} + +// Allocate a scratch register from the VIXL pool, querying first +// the floating-point register pool, and then the core register +// pool. This is essentially a reimplementation of +// vixl::aarch64::UseScratchRegisterScope::AcquireCPURegisterOfSize +// using a different allocation strategy. +static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssembler* masm, + vixl::aarch64::UseScratchRegisterScope* temps, + int size_in_bits) { + return masm->GetScratchVRegisterList()->IsEmpty() + ? CPURegister(temps->AcquireRegisterOfSize(size_in_bits)) + : CPURegister(temps->AcquireVRegisterOfSize(size_in_bits)); +} + +void CodeGeneratorARM64::MoveLocation(Location destination, + Location source, + DataType::Type dst_type) { + if (source.Equals(destination)) { + return; + } + + // A valid move can always be inferred from the destination and source + // locations. When moving from and to a register, the argument type can be + // used to generate 32bit instead of 64bit moves. In debug mode we also + // checks the coherency of the locations and the type. + bool unspecified_type = (dst_type == DataType::Type::kVoid); + + if (destination.IsRegister() || destination.IsFpuRegister()) { + if (unspecified_type) { + HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; + if (source.IsStackSlot() || + (src_cst != nullptr && (src_cst->IsIntConstant() + || src_cst->IsFloatConstant() + || src_cst->IsNullConstant()))) { + // For stack slots and 32bit constants, a 64bit type is appropriate. + dst_type = destination.IsRegister() ? DataType::Type::kInt32 : DataType::Type::kFloat32; + } else { + // If the source is a double stack slot or a 64bit constant, a 64bit + // type is appropriate. Else the source is a register, and since the + // type has not been specified, we chose a 64bit type to force a 64bit + // move. + dst_type = destination.IsRegister() ? DataType::Type::kInt64 : DataType::Type::kFloat64; + } + } + DCHECK((destination.IsFpuRegister() && DataType::IsFloatingPointType(dst_type)) || + (destination.IsRegister() && !DataType::IsFloatingPointType(dst_type))); + CPURegister dst = CPURegisterFrom(destination, dst_type); + if (source.IsStackSlot() || source.IsDoubleStackSlot()) { + DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); + __ Ldr(dst, StackOperandFrom(source)); + } else if (source.IsSIMDStackSlot()) { + __ Ldr(QRegisterFrom(destination), StackOperandFrom(source)); + } else if (source.IsConstant()) { + DCHECK(CoherentConstantAndType(source, dst_type)); + MoveConstant(dst, source.GetConstant()); + } else if (source.IsRegister()) { + if (destination.IsRegister()) { + __ Mov(Register(dst), RegisterFrom(source, dst_type)); + } else { + DCHECK(destination.IsFpuRegister()); + DataType::Type source_type = DataType::Is64BitType(dst_type) + ? DataType::Type::kInt64 + : DataType::Type::kInt32; + __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type)); + } + } else { + DCHECK(source.IsFpuRegister()); + if (destination.IsRegister()) { + DataType::Type source_type = DataType::Is64BitType(dst_type) + ? DataType::Type::kFloat64 + : DataType::Type::kFloat32; + __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type)); + } else { + DCHECK(destination.IsFpuRegister()); + if (GetGraph()->HasSIMD()) { + __ Mov(QRegisterFrom(destination), QRegisterFrom(source)); + } else { + __ Fmov(VRegister(dst), FPRegisterFrom(source, dst_type)); + } + } + } + } else if (destination.IsSIMDStackSlot()) { + if (source.IsFpuRegister()) { + __ Str(QRegisterFrom(source), StackOperandFrom(destination)); + } else { + DCHECK(source.IsSIMDStackSlot()); + UseScratchRegisterScope temps(GetVIXLAssembler()); + if (GetVIXLAssembler()->GetScratchVRegisterList()->IsEmpty()) { + Register temp = temps.AcquireX(); + __ Ldr(temp, MemOperand(sp, source.GetStackIndex())); + __ Str(temp, MemOperand(sp, destination.GetStackIndex())); + __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize)); + __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize)); + } else { + VRegister temp = temps.AcquireVRegisterOfSize(kQRegSize); + __ Ldr(temp, StackOperandFrom(source)); + __ Str(temp, StackOperandFrom(destination)); + } + } + } else { // The destination is not a register. It must be a stack slot. + DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); + if (source.IsRegister() || source.IsFpuRegister()) { + if (unspecified_type) { + if (source.IsRegister()) { + dst_type = destination.IsStackSlot() ? DataType::Type::kInt32 : DataType::Type::kInt64; + } else { + dst_type = + destination.IsStackSlot() ? DataType::Type::kFloat32 : DataType::Type::kFloat64; + } + } + DCHECK((destination.IsDoubleStackSlot() == DataType::Is64BitType(dst_type)) && + (source.IsFpuRegister() == DataType::IsFloatingPointType(dst_type))); + __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination)); + } else if (source.IsConstant()) { + DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type)) + << source << " " << dst_type; + UseScratchRegisterScope temps(GetVIXLAssembler()); + HConstant* src_cst = source.GetConstant(); + CPURegister temp; + if (src_cst->IsZeroBitPattern()) { + temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) + ? Register(xzr) + : Register(wzr); + } else { + if (src_cst->IsIntConstant()) { + temp = temps.AcquireW(); + } else if (src_cst->IsLongConstant()) { + temp = temps.AcquireX(); + } else if (src_cst->IsFloatConstant()) { + temp = temps.AcquireS(); + } else { + DCHECK(src_cst->IsDoubleConstant()); + temp = temps.AcquireD(); + } + MoveConstant(temp, src_cst); + } + __ Str(temp, StackOperandFrom(destination)); + } else { + DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); + DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); + UseScratchRegisterScope temps(GetVIXLAssembler()); + // Use any scratch register (a core or a floating-point one) + // from VIXL scratch register pools as a temporary. + // + // We used to only use the FP scratch register pool, but in some + // rare cases the only register from this pool (D31) would + // already be used (e.g. within a ParallelMove instruction, when + // a move is blocked by a another move requiring a scratch FP + // register, which would reserve D31). To prevent this issue, we + // ask for a scratch register of any type (core or FP). + // + // Also, we start by asking for a FP scratch register first, as the + // demand of scratch core registers is higher. This is why we + // use AcquireFPOrCoreCPURegisterOfSize instead of + // UseScratchRegisterScope::AcquireCPURegisterOfSize, which + // allocates core scratch registers first. + CPURegister temp = AcquireFPOrCoreCPURegisterOfSize( + GetVIXLAssembler(), + &temps, + (destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize)); + __ Ldr(temp, StackOperandFrom(source)); + __ Str(temp, StackOperandFrom(destination)); + } + } +} + +void CodeGeneratorARM64::Load(DataType::Type type, + CPURegister dst, + const MemOperand& src) { + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + __ Ldrb(Register(dst), src); + break; + case DataType::Type::kInt8: + __ Ldrsb(Register(dst), src); + break; + case DataType::Type::kUint16: + __ Ldrh(Register(dst), src); + break; + case DataType::Type::kInt16: + __ Ldrsh(Register(dst), src); + break; + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); + __ Ldr(dst, src); + break; + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << type; + } +} + +void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, + CPURegister dst, + const MemOperand& src, + bool needs_null_check) { + MacroAssembler* masm = GetVIXLAssembler(); + UseScratchRegisterScope temps(masm); + Register temp_base = temps.AcquireX(); + DataType::Type type = instruction->GetType(); + + DCHECK(!src.IsPreIndex()); + DCHECK(!src.IsPostIndex()); + + // TODO(vixl): Let the MacroAssembler handle MemOperand. + __ Add(temp_base, src.GetBaseRegister(), OperandFromMemOperand(src)); + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + MemOperand base = MemOperand(temp_base); + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ ldarb(Register(dst), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + if (type == DataType::Type::kInt8) { + __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ ldarh(Register(dst), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + if (type == DataType::Type::kInt16) { + __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte); + } + break; + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ ldar(Register(dst), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + DCHECK(dst.IsFPRegister()); + DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type)); + + Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ ldar(temp, base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + __ Fmov(VRegister(dst), temp); + break; + } + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << type; + } + } +} + +void CodeGeneratorARM64::Store(DataType::Type type, + CPURegister src, + const MemOperand& dst) { + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + __ Strb(Register(src), dst); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + __ Strh(Register(src), dst); + break; + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); + __ Str(src, dst); + break; + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << type; + } +} + +void CodeGeneratorARM64::StoreRelease(HInstruction* instruction, + DataType::Type type, + CPURegister src, + const MemOperand& dst, + bool needs_null_check) { + MacroAssembler* masm = GetVIXLAssembler(); + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp_base = temps.AcquireX(); + + DCHECK(!dst.IsPreIndex()); + DCHECK(!dst.IsPostIndex()); + + // TODO(vixl): Let the MacroAssembler handle this. + Operand op = OperandFromMemOperand(dst); + __ Add(temp_base, dst.GetBaseRegister(), op); + MemOperand base = MemOperand(temp_base); + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ stlrb(Register(src), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ stlrh(Register(src), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + break; + case DataType::Type::kInt32: + case DataType::Type::kReference: + case DataType::Type::kInt64: + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ stlr(Register(src), base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type)); + Register temp_src; + if (src.IsZero()) { + // The zero register is used to avoid synthesizing zero constants. + temp_src = Register(src); + } else { + DCHECK(src.IsFPRegister()); + temp_src = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); + __ Fmov(temp_src, VRegister(src)); + } + { + ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ stlr(temp_src, base); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + } + break; + } + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << type; + } +} + +void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + ValidateInvokeRuntime(entrypoint, instruction, slow_path); + + ThreadOffset64 entrypoint_offset = GetThreadOffset(entrypoint); + // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the + // entire oat file. This adds an extra branch and we do not want to slow down the main path. + // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative. + if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) { + __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value())); + // Ensure the pc position is recorded immediately after the `blr` instruction. + ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize); + __ blr(lr); + if (EntrypointRequiresStackMap(entrypoint)) { + RecordPcInfo(instruction, dex_pc, slow_path); + } + } else { + // Ensure the pc position is recorded immediately after the `bl` instruction. + ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize); + EmitEntrypointThunkCall(entrypoint_offset); + if (EntrypointRequiresStackMap(entrypoint)) { + RecordPcInfo(instruction, dex_pc, slow_path); + } + } +} + +void CodeGeneratorARM64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, + HInstruction* instruction, + SlowPathCode* slow_path) { + ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path); + __ Ldr(lr, MemOperand(tr, entry_point_offset)); + __ Blr(lr); +} + +void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, + Register class_reg) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temps.AcquireW(); + constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf(); + const size_t status_byte_offset = + mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte); + constexpr uint32_t shifted_visibly_initialized_value = + enum_cast(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte); + + // CMP (immediate) is limited to imm12 or imm12<<12, so we would need to materialize + // the constant 0xf0000000 for comparison with the full 32-bit field. To reduce the code + // size, load only the high byte of the field and compare with 0xf0. + // Note: The same code size could be achieved with LDR+MNV(asr #24)+CBNZ but benchmarks + // show that this pattern is slower (tested on little cores). + __ Ldrb(temp, HeapOperand(class_reg, status_byte_offset)); + __ Cmp(temp, shifted_visibly_initialized_value); + __ B(lo, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorARM64::GenerateBitstringTypeCheckCompare( + HTypeCheckInstruction* check, vixl::aarch64::Register temp) { + uint32_t path_to_root = check->GetBitstringPathToRoot(); + uint32_t mask = check->GetBitstringMask(); + DCHECK(IsPowerOfTwo(mask + 1)); + size_t mask_bits = WhichPowerOf2(mask + 1); + + if (mask_bits == 16u) { + // Load only the bitstring part of the status word. + __ Ldrh(temp, HeapOperand(temp, mirror::Class::StatusOffset())); + } else { + // /* uint32_t */ temp = temp->status_ + __ Ldr(temp, HeapOperand(temp, mirror::Class::StatusOffset())); + // Extract the bitstring bits. + __ Ubfx(temp, temp, 0, mask_bits); + } + // Compare the bitstring bits to `path_to_root`. + __ Cmp(temp, path_to_root); +} + +void CodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { + BarrierType type = BarrierAll; + + switch (kind) { + case MemBarrierKind::kAnyAny: + case MemBarrierKind::kAnyStore: { + type = BarrierAll; + break; + } + case MemBarrierKind::kLoadAny: { + type = BarrierReads; + break; + } + case MemBarrierKind::kStoreStore: { + type = BarrierWrites; + break; + } + default: + LOG(FATAL) << "Unexpected memory barrier " << kind; + } + __ Dmb(InnerShareable, type); +} + +void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, + HBasicBlock* successor) { + SuspendCheckSlowPathARM64* slow_path = + down_cast(instruction->GetSlowPath()); + if (slow_path == nullptr) { + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARM64(instruction, successor); + instruction->SetSlowPath(slow_path); + codegen_->AddSlowPath(slow_path); + if (successor != nullptr) { + DCHECK(successor->IsLoopHeader()); + } + } else { + DCHECK_EQ(slow_path->GetSuccessor(), successor); + } + + UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); + Register temp = temps.AcquireW(); + + __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset().SizeValue())); + if (successor == nullptr) { + __ Cbnz(temp, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetReturnLabel()); + } else { + __ Cbz(temp, codegen_->GetLabelOf(successor)); + __ B(slow_path->GetEntryLabel()); + // slow_path will return to GetLabelOf(successor). + } +} + +InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, + CodeGeneratorARM64* codegen) + : InstructionCodeGenerator(graph, codegen), + assembler_(codegen->GetAssembler()), + codegen_(codegen) {} + +void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { + DCHECK_EQ(instr->InputCount(), 2U); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); + DataType::Type type = instr->GetResultType(); + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; + } +} + +void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, + const FieldInfo& field_info) { + DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); + + bool object_field_get_with_read_barrier = + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_field_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); + if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + // We need a temporary register for the read barrier load in + // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier() + // only if the field is volatile or the offset is too big. + if (field_info.IsVolatile() || + field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) { + locations->AddTemp(FixedTempLocation()); + } + } + locations->SetInAt(0, Location::RequiresRegister()); + if (DataType::IsFloatingPointType(instruction->GetType())) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + // The output overlaps for an object field get when read barriers + // are enabled: we do not want the load to overwrite the object's + // location, as we need it to emit the read barrier. + locations->SetOut( + Location::RequiresRegister(), + object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, + const FieldInfo& field_info) { + DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); + LocationSummary* locations = instruction->GetLocations(); + Location base_loc = locations->InAt(0); + Location out = locations->Out(); + uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType())); + DataType::Type load_type = instruction->GetType(); + MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset()); + + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && + load_type == DataType::Type::kReference) { + // Object FieldGet with Baker's read barrier case. + // /* HeapReference */ out = *(base + offset) + Register base = RegisterFrom(base_loc, DataType::Type::kReference); + Location maybe_temp = + (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location::NoLocation(); + // Note that potential implicit null checks are handled in this + // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier call. + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, + out, + base, + offset, + maybe_temp, + /* needs_null_check= */ true, + field_info.IsVolatile()); + } else { + // General case. + if (field_info.IsVolatile()) { + // Note that a potential implicit null check is handled in this + // CodeGeneratorARM64::LoadAcquire call. + // NB: LoadAcquire will record the pc info if needed. + codegen_->LoadAcquire( + instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true); + } else { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->Load(load_type, OutputCPURegister(instruction), field); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + if (load_type == DataType::Type::kReference) { + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset); + } + } +} + +void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + if (IsConstantZeroBitPattern(instruction->InputAt(1))) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + } else if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) { + locations->SetInAt(1, Location::RequiresFpuRegister()); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction, + const FieldInfo& field_info, + bool value_can_be_null) { + DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); + + Register obj = InputRegisterAt(instruction, 0); + CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1); + CPURegister source = value; + Offset offset = field_info.GetFieldOffset(); + DataType::Type field_type = field_info.GetFieldType(); + + { + // We use a block to end the scratch scope before the write barrier, thus + // freeing the temporary registers so they can be used in `MarkGCCard`. + UseScratchRegisterScope temps(GetVIXLAssembler()); + + if (kPoisonHeapReferences && field_type == DataType::Type::kReference) { + DCHECK(value.IsW()); + Register temp = temps.AcquireW(); + __ Mov(temp, value.W()); + GetAssembler()->PoisonHeapReference(temp.W()); + source = temp; + } + + if (field_info.IsVolatile()) { + codegen_->StoreRelease( + instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true); + } else { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->Store(field_type, source, HeapOperand(obj, offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } + + if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { + codegen_->MarkGCCard(obj, Register(value), value_can_be_null); + } +} + +void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { + DataType::Type type = instr->GetType(); + + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + Register dst = OutputRegister(instr); + Register lhs = InputRegisterAt(instr, 0); + Operand rhs = InputOperandAt(instr, 1); + if (instr->IsAdd()) { + __ Add(dst, lhs, rhs); + } else if (instr->IsAnd()) { + __ And(dst, lhs, rhs); + } else if (instr->IsOr()) { + __ Orr(dst, lhs, rhs); + } else if (instr->IsSub()) { + __ Sub(dst, lhs, rhs); + } else if (instr->IsRor()) { + if (rhs.IsImmediate()) { + uint32_t shift = rhs.GetImmediate() & (lhs.GetSizeInBits() - 1); + __ Ror(dst, lhs, shift); + } else { + // Ensure shift distance is in the same size register as the result. If + // we are rotating a long and the shift comes in a w register originally, + // we don't need to sxtw for use as an x since the shift distances are + // all & reg_bits - 1. + __ Ror(dst, lhs, RegisterFrom(instr->GetLocations()->InAt(1), type)); + } + } else if (instr->IsMin() || instr->IsMax()) { + __ Cmp(lhs, rhs); + __ Csel(dst, lhs, rhs, instr->IsMin() ? lt : gt); + } else { + DCHECK(instr->IsXor()); + __ Eor(dst, lhs, rhs); + } + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + VRegister dst = OutputFPRegister(instr); + VRegister lhs = InputFPRegisterAt(instr, 0); + VRegister rhs = InputFPRegisterAt(instr, 1); + if (instr->IsAdd()) { + __ Fadd(dst, lhs, rhs); + } else if (instr->IsSub()) { + __ Fsub(dst, lhs, rhs); + } else if (instr->IsMin()) { + __ Fmin(dst, lhs, rhs); + } else if (instr->IsMax()) { + __ Fmax(dst, lhs, rhs); + } else { + LOG(FATAL) << "Unexpected floating-point binary operation"; + } + break; + } + default: + LOG(FATAL) << "Unexpected binary operation type " << type; + } +} + +void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { + DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); + + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); + DataType::Type type = instr->GetResultType(); + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + default: + LOG(FATAL) << "Unexpected shift type " << type; + } +} + +void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { + DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); + + DataType::Type type = instr->GetType(); + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + Register dst = OutputRegister(instr); + Register lhs = InputRegisterAt(instr, 0); + Operand rhs = InputOperandAt(instr, 1); + if (rhs.IsImmediate()) { + uint32_t shift_value = rhs.GetImmediate() & + (type == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance); + if (instr->IsShl()) { + __ Lsl(dst, lhs, shift_value); + } else if (instr->IsShr()) { + __ Asr(dst, lhs, shift_value); + } else { + __ Lsr(dst, lhs, shift_value); + } + } else { + Register rhs_reg = dst.IsX() ? rhs.GetRegister().X() : rhs.GetRegister().W(); + + if (instr->IsShl()) { + __ Lsl(dst, lhs, rhs_reg); + } else if (instr->IsShr()) { + __ Asr(dst, lhs, rhs_reg); + } else { + __ Lsr(dst, lhs, rhs_reg); + } + } + break; + } + default: + LOG(FATAL) << "Unexpected shift operation type " << type; + } +} + +void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { + HandleBinaryOp(instruction); +} + +void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { + HandleBinaryOp(instruction); +} + +void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { + HandleBinaryOp(instruction); +} + +void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { + HandleBinaryOp(instruction); +} + +void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { + DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType(); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr); + locations->SetInAt(0, Location::RequiresRegister()); + // There is no immediate variant of negated bitwise instructions in AArch64. + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { + Register dst = OutputRegister(instr); + Register lhs = InputRegisterAt(instr, 0); + Register rhs = InputRegisterAt(instr, 1); + + switch (instr->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(dst, lhs, rhs); + break; + case HInstruction::kOr: + __ Orn(dst, lhs, rhs); + break; + case HInstruction::kXor: + __ Eon(dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unreachable"; + } +} + +void LocationsBuilderARM64::VisitDataProcWithShifterOp( + HDataProcWithShifterOp* instruction) { + DCHECK(instruction->GetType() == DataType::Type::kInt32 || + instruction->GetType() == DataType::Type::kInt64); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + if (instruction->GetInstrKind() == HInstruction::kNeg) { + locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant())); + } else { + locations->SetInAt(0, Location::RequiresRegister()); + } + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitDataProcWithShifterOp( + HDataProcWithShifterOp* instruction) { + DataType::Type type = instruction->GetType(); + HInstruction::InstructionKind kind = instruction->GetInstrKind(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); + Register out = OutputRegister(instruction); + Register left; + if (kind != HInstruction::kNeg) { + left = InputRegisterAt(instruction, 0); + } + // If this `HDataProcWithShifterOp` was created by merging a type conversion as the + // shifter operand operation, the IR generating `right_reg` (input to the type + // conversion) can have a different type from the current instruction's type, + // so we manually indicate the type. + Register right_reg = RegisterFrom(instruction->GetLocations()->InAt(1), type); + Operand right_operand(0); + + HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind(); + if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { + right_operand = Operand(right_reg, helpers::ExtendFromOpKind(op_kind)); + } else { + right_operand = Operand(right_reg, + helpers::ShiftFromOpKind(op_kind), + instruction->GetShiftAmount()); + } + + // Logical binary operations do not support extension operations in the + // operand. Note that VIXL would still manage if it was passed by generating + // the extension as a separate instruction. + // `HNeg` also does not support extension. See comments in `ShifterOperandSupportsExtension()`. + DCHECK(!right_operand.IsExtendedRegister() || + (kind != HInstruction::kAnd && kind != HInstruction::kOr && kind != HInstruction::kXor && + kind != HInstruction::kNeg)); + switch (kind) { + case HInstruction::kAdd: + __ Add(out, left, right_operand); + break; + case HInstruction::kAnd: + __ And(out, left, right_operand); + break; + case HInstruction::kNeg: + DCHECK(instruction->InputAt(0)->AsConstant()->IsArithmeticZero()); + __ Neg(out, right_operand); + break; + case HInstruction::kOr: + __ Orr(out, left, right_operand); + break; + case HInstruction::kSub: + __ Sub(out, left, right_operand); + break; + case HInstruction::kXor: + __ Eor(out, left, right_operand); + break; + default: + LOG(FATAL) << "Unexpected operation kind: " << kind; + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) { + __ Add(OutputRegister(instruction), + InputRegisterAt(instruction, 0), + Operand(InputOperandAt(instruction, 1))); +} + +void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIndex* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + + HIntConstant* shift = instruction->GetShift()->AsIntConstant(); + + locations->SetInAt(0, Location::RequiresRegister()); + // For byte case we don't need to shift the index variable so we can encode the data offset into + // ADD instruction. For other cases we prefer the data_offset to be in register; that will hoist + // data offset constant generation out of the loop and reduce the critical path length in the + // loop. + locations->SetInAt(1, shift->GetValue() == 0 + ? Location::ConstantLocation(instruction->GetOffset()->AsIntConstant()) + : Location::RequiresRegister()); + locations->SetInAt(2, Location::ConstantLocation(shift)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitIntermediateAddressIndex( + HIntermediateAddressIndex* instruction) { + Register index_reg = InputRegisterAt(instruction, 0); + uint32_t shift = Int64FromLocation(instruction->GetLocations()->InAt(2)); + uint32_t offset = instruction->GetOffset()->AsIntConstant()->GetValue(); + + if (shift == 0) { + __ Add(OutputRegister(instruction), index_reg, offset); + } else { + Register offset_reg = InputRegisterAt(instruction, 1); + __ Add(OutputRegister(instruction), offset_reg, Operand(index_reg, LSL, shift)); + } +} + +void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall); + HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex); + if (instr->GetOpKind() == HInstruction::kSub && + accumulator->IsConstant() && + accumulator->AsConstant()->IsArithmeticZero()) { + // Don't allocate register for Mneg instruction. + } else { + locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, + Location::RequiresRegister()); + } + locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); + locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + Register res = OutputRegister(instr); + Register mul_left = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex); + Register mul_right = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex); + + // Avoid emitting code that could trigger Cortex A53's erratum 835769. + // This fixup should be carried out for all multiply-accumulate instructions: + // madd, msub, smaddl, smsubl, umaddl and umsubl. + if (instr->GetType() == DataType::Type::kInt64 && + codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) { + MacroAssembler* masm = down_cast(codegen_)->GetVIXLAssembler(); + vixl::aarch64::Instruction* prev = + masm->GetCursorAddress() - kInstructionSize; + if (prev->IsLoadOrStore()) { + // Make sure we emit only exactly one nop. + ExactAssemblyScope scope(masm, kInstructionSize, CodeBufferCheckScope::kExactSize); + __ nop(); + } + } + + if (instr->GetOpKind() == HInstruction::kAdd) { + Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex); + __ Madd(res, mul_left, mul_right, accumulator); + } else { + DCHECK(instr->GetOpKind() == HInstruction::kSub); + HInstruction* accum_instr = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex); + if (accum_instr->IsConstant() && accum_instr->AsConstant()->IsArithmeticZero()) { + __ Mneg(res, mul_left, mul_right); + } else { + Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex); + __ Msub(res, mul_left, mul_right, accumulator); + } + } +} + +void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { + bool object_array_get_with_read_barrier = + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); + if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + if (instruction->GetIndex()->IsConstant()) { + // Array loads with constant index are treated as field loads. + // We need a temporary register for the read barrier load in + // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier() + // only if the offset is too big. + uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction); + uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue(); + offset += index << DataType::SizeShift(DataType::Type::kReference); + if (offset >= kReferenceLoadMinFarOffset) { + locations->AddTemp(FixedTempLocation()); + } + } else if (!instruction->GetArray()->IsIntermediateAddress()) { + // We need a non-scratch temporary for the array data pointer in + // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier() for the case with no + // intermediate address. + locations->AddTemp(Location::RequiresRegister()); + } + } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (DataType::IsFloatingPointType(instruction->GetType())) { + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } else { + // The output overlaps in the case of an object array get with + // read barriers enabled: we do not want the move to overwrite the + // array's location, as we need it to emit the read barrier. + locations->SetOut( + Location::RequiresRegister(), + object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { + DataType::Type type = instruction->GetType(); + Register obj = InputRegisterAt(instruction, 0); + LocationSummary* locations = instruction->GetLocations(); + Location index = locations->InAt(1); + Location out = locations->Out(); + uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction); + const bool maybe_compressed_char_at = mirror::kUseStringCompression && + instruction->IsStringCharAt(); + MacroAssembler* masm = GetVIXLAssembler(); + UseScratchRegisterScope temps(masm); + + // The non-Baker read barrier instrumentation of object ArrayGet instructions + // does not support the HIntermediateAddress instruction. + DCHECK(!((type == DataType::Type::kReference) && + instruction->GetArray()->IsIntermediateAddress() && + kEmitCompilerReadBarrier && + !kUseBakerReadBarrier)); + + if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Object ArrayGet with Baker's read barrier case. + // Note that a potential implicit null check is handled in the + // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call. + DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); + if (index.IsConstant()) { + DCHECK(!instruction->GetArray()->IsIntermediateAddress()); + // Array load with a constant index can be treated as a field load. + offset += Int64FromLocation(index) << DataType::SizeShift(type); + Location maybe_temp = + (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location::NoLocation(); + codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, + out, + obj.W(), + offset, + maybe_temp, + /* needs_null_check= */ false, + /* use_load_acquire= */ false); + } else { + codegen_->GenerateArrayLoadWithBakerReadBarrier( + instruction, out, obj.W(), offset, index, /* needs_null_check= */ false); + } + } else { + // General case. + MemOperand source = HeapOperand(obj); + Register length; + if (maybe_compressed_char_at) { + uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + length = temps.AcquireW(); + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + + if (instruction->GetArray()->IsIntermediateAddress()) { + DCHECK_LT(count_offset, offset); + int64_t adjusted_offset = + static_cast(count_offset) - static_cast(offset); + // Note that `adjusted_offset` is negative, so this will be a LDUR. + __ Ldr(length, MemOperand(obj.X(), adjusted_offset)); + } else { + __ Ldr(length, HeapOperand(obj, count_offset)); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } + if (index.IsConstant()) { + if (maybe_compressed_char_at) { + vixl::aarch64::Label uncompressed_load, done; + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + __ Tbnz(length.W(), 0, &uncompressed_load); + __ Ldrb(Register(OutputCPURegister(instruction)), + HeapOperand(obj, offset + Int64FromLocation(index))); + __ B(&done); + __ Bind(&uncompressed_load); + __ Ldrh(Register(OutputCPURegister(instruction)), + HeapOperand(obj, offset + (Int64FromLocation(index) << 1))); + __ Bind(&done); + } else { + offset += Int64FromLocation(index) << DataType::SizeShift(type); + source = HeapOperand(obj, offset); + } + } else { + Register temp = temps.AcquireSameSizeAs(obj); + if (instruction->GetArray()->IsIntermediateAddress()) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress(); + DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset); + } + temp = obj; + } else { + __ Add(temp, obj, offset); + } + if (maybe_compressed_char_at) { + vixl::aarch64::Label uncompressed_load, done; + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + __ Tbnz(length.W(), 0, &uncompressed_load); + __ Ldrb(Register(OutputCPURegister(instruction)), + HeapOperand(temp, XRegisterFrom(index), LSL, 0)); + __ B(&done); + __ Bind(&uncompressed_load); + __ Ldrh(Register(OutputCPURegister(instruction)), + HeapOperand(temp, XRegisterFrom(index), LSL, 1)); + __ Bind(&done); + } else { + source = HeapOperand(temp, XRegisterFrom(index), LSL, DataType::SizeShift(type)); + } + } + if (!maybe_compressed_char_at) { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->Load(type, OutputCPURegister(instruction), source); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + + if (type == DataType::Type::kReference) { + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + Location obj_loc = locations->InAt(0); + if (index.IsConstant()) { + codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, obj_loc, offset); + } else { + codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, obj_loc, offset, index); + } + } + } +} + +void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); + vixl::aarch64::Register out = OutputRegister(instruction); + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + // Mask out compression flag from String's array length. + if (mirror::kUseStringCompression && instruction->IsStringLength()) { + __ Lsr(out.W(), out.W(), 1u); + } +} + +void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { + DataType::Type value_type = instruction->GetComponentType(); + + bool needs_type_check = instruction->NeedsTypeCheck(); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, + needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (IsConstantZeroBitPattern(instruction->InputAt(2))) { + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + } else if (DataType::IsFloatingPointType(value_type)) { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { + DataType::Type value_type = instruction->GetComponentType(); + LocationSummary* locations = instruction->GetLocations(); + bool needs_type_check = instruction->NeedsTypeCheck(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); + + Register array = InputRegisterAt(instruction, 0); + CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 2); + CPURegister source = value; + Location index = locations->InAt(1); + size_t offset = mirror::Array::DataOffset(DataType::Size(value_type)).Uint32Value(); + MemOperand destination = HeapOperand(array); + MacroAssembler* masm = GetVIXLAssembler(); + + if (!needs_write_barrier) { + DCHECK(!needs_type_check); + if (index.IsConstant()) { + offset += Int64FromLocation(index) << DataType::SizeShift(value_type); + destination = HeapOperand(array, offset); + } else { + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireSameSizeAs(array); + if (instruction->GetArray()->IsIntermediateAddress()) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress(); + DCHECK(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset); + } + temp = array; + } else { + __ Add(temp, array, offset); + } + destination = HeapOperand(temp, + XRegisterFrom(index), + LSL, + DataType::SizeShift(value_type)); + } + { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->Store(value_type, value, destination); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } else { + DCHECK(!instruction->GetArray()->IsIntermediateAddress()); + + bool can_value_be_null = instruction->GetValueCanBeNull(); + vixl::aarch64::Label do_store; + if (can_value_be_null) { + __ Cbz(Register(value), &do_store); + } + + SlowPathCodeARM64* slow_path = nullptr; + if (needs_type_check) { + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireSameSizeAs(array); + Register temp2 = temps.AcquireSameSizeAs(array); + + // Note that when Baker read barriers are enabled, the type + // checks are performed without read barriers. This is fine, + // even in the case where a class object is in the from-space + // after the flip, as a comparison involving such a type would + // not produce a false positive; it may of course produce a + // false negative, in which case we would take the ArraySet + // slow path. + + // /* HeapReference */ temp = array->klass_ + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + __ Ldr(temp, HeapOperand(array, class_offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + GetAssembler()->MaybeUnpoisonHeapReference(temp); + + // /* HeapReference */ temp = temp->component_type_ + __ Ldr(temp, HeapOperand(temp, component_offset)); + // /* HeapReference */ temp2 = value->klass_ + __ Ldr(temp2, HeapOperand(Register(value), class_offset)); + // If heap poisoning is enabled, no need to unpoison `temp` + // nor `temp2`, as we are comparing two poisoned references. + __ Cmp(temp, temp2); + + if (instruction->StaticTypeOfArrayIsObjectArray()) { + vixl::aarch64::Label do_put; + __ B(eq, &do_put); + // If heap poisoning is enabled, the `temp` reference has + // not been unpoisoned yet; unpoison it now. + GetAssembler()->MaybeUnpoisonHeapReference(temp); + + // /* HeapReference */ temp = temp->super_class_ + __ Ldr(temp, HeapOperand(temp, super_offset)); + // If heap poisoning is enabled, no need to unpoison + // `temp`, as we are comparing against null below. + __ Cbnz(temp, slow_path->GetEntryLabel()); + __ Bind(&do_put); + } else { + __ B(ne, slow_path->GetEntryLabel()); + } + } + + codegen_->MarkGCCard(array, value.W(), /* value_can_be_null= */ false); + + if (can_value_be_null) { + DCHECK(do_store.IsLinked()); + __ Bind(&do_store); + } + + UseScratchRegisterScope temps(masm); + if (kPoisonHeapReferences) { + Register temp_source = temps.AcquireSameSizeAs(array); + DCHECK(value.IsW()); + __ Mov(temp_source, value.W()); + GetAssembler()->PoisonHeapReference(temp_source); + source = temp_source; + } + + if (index.IsConstant()) { + offset += Int64FromLocation(index) << DataType::SizeShift(value_type); + destination = HeapOperand(array, offset); + } else { + Register temp_base = temps.AcquireSameSizeAs(array); + __ Add(temp_base, array, offset); + destination = HeapOperand(temp_base, + XRegisterFrom(index), + LSL, + DataType::SizeShift(value_type)); + } + + { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + __ Str(source, destination); + + if (can_value_be_null || !needs_type_check) { + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } + } +} + +void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1).GetCode())); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); + + // If both index and length are constant, we can check the bounds statically and + // generate code accordingly. We want to make sure we generate constant locations + // in that case, regardless of whether they are encodable in the comparison or not. + HInstruction* index = instruction->InputAt(0); + HInstruction* length = instruction->InputAt(1); + bool both_const = index->IsConstant() && length->IsConstant(); + locations->SetInAt(0, both_const + ? Location::ConstantLocation(index->AsConstant()) + : ARM64EncodableConstantOrRegister(index, instruction)); + locations->SetInAt(1, both_const + ? Location::ConstantLocation(length->AsConstant()) + : ARM64EncodableConstantOrRegister(length, instruction)); +} + +void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location index_loc = locations->InAt(0); + Location length_loc = locations->InAt(1); + + int cmp_first_input = 0; + int cmp_second_input = 1; + Condition cond = hs; + + if (index_loc.IsConstant()) { + int64_t index = Int64FromLocation(index_loc); + if (length_loc.IsConstant()) { + int64_t length = Int64FromLocation(length_loc); + if (index < 0 || index >= length) { + BoundsCheckSlowPathARM64* slow_path = + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + } else { + // BCE will remove the bounds check if we are guaranteed to pass. + // However, some optimization after BCE may have generated this, and we should not + // generate a bounds check if it is a valid range. + } + return; + } + // Only the index is constant: change the order of the operands and commute the condition + // so we can use an immediate constant for the index (only the second input to a cmp + // instruction can be an immediate). + cmp_first_input = 1; + cmp_second_input = 0; + cond = ls; + } + BoundsCheckSlowPathARM64* slow_path = + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction); + __ Cmp(InputRegisterAt(instruction, cmp_first_input), + InputOperandAt(instruction, cmp_second_input)); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel(), cond); +} + +void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + if (check->HasUses()) { + locations->SetOut(Location::SameAsFirstInput()); + } + // Rely on the type initialization to save everything we need. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); +} + +void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { + // We assume the class is not null. + SlowPathCodeARM64* slow_path = + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(check->GetLoadClass(), check); + codegen_->AddSlowPath(slow_path); + GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); +} + +static bool IsFloatingPointZeroConstant(HInstruction* inst) { + return (inst->IsFloatConstant() && (inst->AsFloatConstant()->IsArithmeticZero())) + || (inst->IsDoubleConstant() && (inst->AsDoubleConstant()->IsArithmeticZero())); +} + +void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) { + VRegister lhs_reg = InputFPRegisterAt(instruction, 0); + Location rhs_loc = instruction->GetLocations()->InAt(1); + if (rhs_loc.IsConstant()) { + // 0.0 is the only immediate that can be encoded directly in + // an FCMP instruction. + // + // Both the JLS (section 15.20.1) and the JVMS (section 6.5) + // specify that in a floating-point comparison, positive zero + // and negative zero are considered equal, so we can use the + // literal 0.0 for both cases here. + // + // Note however that some methods (Float.equal, Float.compare, + // Float.compareTo, Double.equal, Double.compare, + // Double.compareTo, Math.max, Math.min, StrictMath.max, + // StrictMath.min) consider 0.0 to be (strictly) greater than + // -0.0. So if we ever translate calls to these methods into a + // HCompare instruction, we must handle the -0.0 case with + // care here. + DCHECK(IsFloatingPointZeroConstant(rhs_loc.GetConstant())); + __ Fcmp(lhs_reg, 0.0); + } else { + __ Fcmp(lhs_reg, InputFPRegisterAt(instruction, 1)); + } +} + +void LocationsBuilderARM64::VisitCompare(HCompare* compare) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); + DataType::Type in_type = compare->InputAt(0)->GetType(); + switch (in_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, + IsFloatingPointZeroConstant(compare->InputAt(1)) + ? Location::ConstantLocation(compare->InputAt(1)->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + break; + } + default: + LOG(FATAL) << "Unexpected type for compare operation " << in_type; + } +} + +void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { + DataType::Type in_type = compare->InputAt(0)->GetType(); + + // 0 if: left == right + // 1 if: left > right + // -1 if: left < right + switch (in_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + Register result = OutputRegister(compare); + Register left = InputRegisterAt(compare, 0); + Operand right = InputOperandAt(compare, 1); + __ Cmp(left, right); + __ Cset(result, ne); // result == +1 if NE or 0 otherwise + __ Cneg(result, result, lt); // result == -1 if LT or unchanged otherwise + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + Register result = OutputRegister(compare); + GenerateFcmp(compare); + __ Cset(result, ne); + __ Cneg(result, result, ARM64FPCondition(kCondLT, compare->IsGtBias())); + break; + } + default: + LOG(FATAL) << "Unimplemented compare type " << in_type; + } +} + +void LocationsBuilderARM64::HandleCondition(HCondition* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + + if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, + IsFloatingPointZeroConstant(instruction->InputAt(1)) + ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant()) + : Location::RequiresFpuRegister()); + } else { + // Integer cases. + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); + } + + if (!instruction->IsEmittedAtUseSite()) { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARM64::HandleCondition(HCondition* instruction) { + if (instruction->IsEmittedAtUseSite()) { + return; + } + + LocationSummary* locations = instruction->GetLocations(); + Register res = RegisterFrom(locations->Out(), instruction->GetType()); + IfCondition if_cond = instruction->GetCondition(); + + if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + GenerateFcmp(instruction); + __ Cset(res, ARM64FPCondition(if_cond, instruction->IsGtBias())); + } else { + // Integer cases. + Register lhs = InputRegisterAt(instruction, 0); + Operand rhs = InputOperandAt(instruction, 1); + __ Cmp(lhs, rhs); + __ Cset(res, ARM64Condition(if_cond)); + } +} + +#define FOR_EACH_CONDITION_INSTRUCTION(M) \ + M(Equal) \ + M(NotEqual) \ + M(LessThan) \ + M(LessThanOrEqual) \ + M(GreaterThan) \ + M(GreaterThanOrEqual) \ + M(Below) \ + M(BelowOrEqual) \ + M(Above) \ + M(AboveOrEqual) +#define DEFINE_CONDITION_VISITORS(Name) \ +void LocationsBuilderARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); } \ +void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); } +FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) +#undef DEFINE_CONDITION_VISITORS +#undef FOR_EACH_CONDITION_INSTRUCTION + +void InstructionCodeGeneratorARM64::GenerateIntDivForPower2Denom(HDiv* instruction) { + int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); + uint64_t abs_imm = static_cast(AbsOrMin(imm)); + DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm; + + Register out = OutputRegister(instruction); + Register dividend = InputRegisterAt(instruction, 0); + + if (abs_imm == 2) { + int bits = DataType::Size(instruction->GetResultType()) * kBitsPerByte; + __ Add(out, dividend, Operand(dividend, LSR, bits - 1)); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temps.AcquireSameSizeAs(out); + __ Add(temp, dividend, abs_imm - 1); + __ Cmp(dividend, 0); + __ Csel(out, temp, dividend, lt); + } + + int ctz_imm = CTZ(abs_imm); + if (imm > 0) { + __ Asr(out, out, ctz_imm); + } else { + __ Neg(out, Operand(out, ASR, ctz_imm)); + } +} + +void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsRem()); + + LocationSummary* locations = instruction->GetLocations(); + Location second = locations->InAt(1); + DCHECK(second.IsConstant()); + + Register out = OutputRegister(instruction); + Register dividend = InputRegisterAt(instruction, 0); + int64_t imm = Int64FromConstant(second.GetConstant()); + + DataType::Type type = instruction->GetResultType(); + DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); + + int64_t magic; + int shift; + CalculateMagicAndShiftForDivRem( + imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temps.AcquireSameSizeAs(out); + + // temp = get_high(dividend * magic) + __ Mov(temp, magic); + if (type == DataType::Type::kInt64) { + __ Smulh(temp, dividend, temp); + } else { + __ Smull(temp.X(), dividend, temp); + __ Lsr(temp.X(), temp.X(), 32); + } + + if (imm > 0 && magic < 0) { + __ Add(temp, temp, dividend); + } else if (imm < 0 && magic > 0) { + __ Sub(temp, temp, dividend); + } + + if (shift != 0) { + __ Asr(temp, temp, shift); + } + + if (instruction->IsDiv()) { + __ Sub(out, temp, Operand(temp, ASR, type == DataType::Type::kInt64 ? 63 : 31)); + } else { + __ Sub(temp, temp, Operand(temp, ASR, type == DataType::Type::kInt64 ? 63 : 31)); + // TODO: Strength reduction for msub. + Register temp_imm = temps.AcquireSameSizeAs(out); + __ Mov(temp_imm, imm); + __ Msub(out, temp, temp_imm, dividend); + } +} + +void InstructionCodeGeneratorARM64::GenerateIntDivForConstDenom(HDiv *instruction) { + int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); + + if (imm == 0) { + // Do not generate anything. DivZeroCheck would prevent any code to be executed. + return; + } + + if (IsPowerOfTwo(AbsOrMin(imm))) { + GenerateIntDivForPower2Denom(instruction); + } else { + // Cases imm == -1 or imm == 1 are handled by InstructionSimplifier. + DCHECK(imm < -2 || imm > 2) << imm; + GenerateDivRemWithAnyConstant(instruction); + } +} + +void InstructionCodeGeneratorARM64::GenerateIntDiv(HDiv *instruction) { + DCHECK(DataType::IsIntOrLongType(instruction->GetResultType())) + << instruction->GetResultType(); + + if (instruction->GetLocations()->InAt(1).IsConstant()) { + GenerateIntDivForConstDenom(instruction); + } else { + Register out = OutputRegister(instruction); + Register dividend = InputRegisterAt(instruction, 0); + Register divisor = InputRegisterAt(instruction, 1); + __ Sdiv(out, dividend, divisor); + } +} + +void LocationsBuilderARM64::VisitDiv(HDiv* div) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall); + switch (div->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected div type " << div->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { + DataType::Type type = div->GetResultType(); + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + GenerateIntDiv(div); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); + break; + + default: + LOG(FATAL) << "Unexpected div type " << type; + } +} + +void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); +} + +void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { + SlowPathCodeARM64* slow_path = + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + Location value = instruction->GetLocations()->InAt(0); + + DataType::Type type = instruction->GetType(); + + if (!DataType::IsIntegralType(type)) { + LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; + UNREACHABLE(); + } + + if (value.IsConstant()) { + int64_t divisor = Int64FromLocation(value); + if (divisor == 0) { + __ B(slow_path->GetEntryLabel()); + } else { + // A division by a non-null constant is valid. We don't need to perform + // any check, so simply fall through. + } + } else { + __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); + } +} + +void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARM64::VisitDoubleConstant( + HDoubleConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARM64::VisitExit(HExit* exit) { + exit->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { +} + +void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) { + if (successor->IsExitBlock()) { + DCHECK(got->GetPrevious()->AlwaysThrows()); + return; // no code needed + } + + HBasicBlock* block = got->GetBlock(); + HInstruction* previous = got->GetPrevious(); + HLoopInformation* info = block->GetLoopInformation(); + + if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { + codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false); + GenerateSuspendCheck(info->GetSuspendCheck(), successor); + return; + } + if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { + GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + } + if (!codegen_->GoesToNextBlock(block, successor)) { + __ B(codegen_->GetLabelOf(successor)); + } +} + +void LocationsBuilderARM64::VisitGoto(HGoto* got) { + got->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { + HandleGoto(got, got->GetSuccessor()); +} + +void LocationsBuilderARM64::VisitTryBoundary(HTryBoundary* try_boundary) { + try_boundary->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitTryBoundary(HTryBoundary* try_boundary) { + HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor(); + if (!successor->IsExitBlock()) { + HandleGoto(try_boundary, successor); + } +} + +void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction, + size_t condition_input_index, + vixl::aarch64::Label* true_target, + vixl::aarch64::Label* false_target) { + HInstruction* cond = instruction->InputAt(condition_input_index); + + if (true_target == nullptr && false_target == nullptr) { + // Nothing to do. The code always falls through. + return; + } else if (cond->IsIntConstant()) { + // Constant condition, statically compared against "true" (integer value 1). + if (cond->AsIntConstant()->IsTrue()) { + if (true_target != nullptr) { + __ B(true_target); + } + } else { + DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue(); + if (false_target != nullptr) { + __ B(false_target); + } + } + return; + } + + // The following code generates these patterns: + // (1) true_target == nullptr && false_target != nullptr + // - opposite condition true => branch to false_target + // (2) true_target != nullptr && false_target == nullptr + // - condition true => branch to true_target + // (3) true_target != nullptr && false_target != nullptr + // - condition true => branch to true_target + // - branch to false_target + if (IsBooleanValueOrMaterializedCondition(cond)) { + // The condition instruction has been materialized, compare the output to 0. + Location cond_val = instruction->GetLocations()->InAt(condition_input_index); + DCHECK(cond_val.IsRegister()); + if (true_target == nullptr) { + __ Cbz(InputRegisterAt(instruction, condition_input_index), false_target); + } else { + __ Cbnz(InputRegisterAt(instruction, condition_input_index), true_target); + } + } else { + // The condition instruction has not been materialized, use its inputs as + // the comparison and its condition as the branch condition. + HCondition* condition = cond->AsCondition(); + + DataType::Type type = condition->InputAt(0)->GetType(); + if (DataType::IsFloatingPointType(type)) { + GenerateFcmp(condition); + if (true_target == nullptr) { + IfCondition opposite_condition = condition->GetOppositeCondition(); + __ B(ARM64FPCondition(opposite_condition, condition->IsGtBias()), false_target); + } else { + __ B(ARM64FPCondition(condition->GetCondition(), condition->IsGtBias()), true_target); + } + } else { + // Integer cases. + Register lhs = InputRegisterAt(condition, 0); + Operand rhs = InputOperandAt(condition, 1); + + Condition arm64_cond; + vixl::aarch64::Label* non_fallthrough_target; + if (true_target == nullptr) { + arm64_cond = ARM64Condition(condition->GetOppositeCondition()); + non_fallthrough_target = false_target; + } else { + arm64_cond = ARM64Condition(condition->GetCondition()); + non_fallthrough_target = true_target; + } + + if ((arm64_cond == eq || arm64_cond == ne || arm64_cond == lt || arm64_cond == ge) && + rhs.IsImmediate() && (rhs.GetImmediate() == 0)) { + switch (arm64_cond) { + case eq: + __ Cbz(lhs, non_fallthrough_target); + break; + case ne: + __ Cbnz(lhs, non_fallthrough_target); + break; + case lt: + // Test the sign bit and branch accordingly. + __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target); + break; + case ge: + // Test the sign bit and branch accordingly. + __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target); + break; + default: + // Without the `static_cast` the compiler throws an error for + // `-Werror=sign-promo`. + LOG(FATAL) << "Unexpected condition: " << static_cast(arm64_cond); + } + } else { + __ Cmp(lhs, rhs); + __ B(arm64_cond, non_fallthrough_target); + } + } + } + + // If neither branch falls through (case 3), the conditional branch to `true_target` + // was already emitted (case 2) and we need to emit a jump to `false_target`. + if (true_target != nullptr && false_target != nullptr) { + __ B(false_target); + } +} + +void LocationsBuilderARM64::VisitIf(HIf* if_instr) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); + if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { + locations->SetInAt(0, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { + HBasicBlock* true_successor = if_instr->IfTrueSuccessor(); + HBasicBlock* false_successor = if_instr->IfFalseSuccessor(); + vixl::aarch64::Label* true_target = codegen_->GetLabelOf(true_successor); + if (codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor)) { + true_target = nullptr; + } + vixl::aarch64::Label* false_target = codegen_->GetLabelOf(false_successor); + if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) { + false_target = nullptr; + } + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); +} + +void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); + InvokeRuntimeCallingConvention calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); + locations->SetCustomSlowPathCallerSaves(caller_saves); + if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { + locations->SetInAt(0, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) { + SlowPathCodeARM64* slow_path = + deopt_slow_paths_.NewSlowPath(deoptimize); + GenerateTestAndBranch(deoptimize, + /* condition_input_index= */ 0, + slow_path->GetEntryLabel(), + /* false_target= */ nullptr); +} + +void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(flag, LocationSummary::kNoCall); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { + __ Ldr(OutputRegister(flag), + MemOperand(sp, codegen_->GetStackOffsetOfShouldDeoptimizeFlag())); +} + +static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) { + return condition->IsCondition() && + DataType::IsFloatingPointType(condition->InputAt(0)->GetType()); +} + +static inline Condition GetConditionForSelect(HCondition* condition) { + IfCondition cond = condition->AsCondition()->GetCondition(); + return IsConditionOnFloatingPointValues(condition) ? ARM64FPCondition(cond, condition->IsGtBias()) + : ARM64Condition(cond); +} + +void LocationsBuilderARM64::VisitSelect(HSelect* select) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); + if (DataType::IsFloatingPointType(select->GetType())) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } else { + HConstant* cst_true_value = select->GetTrueValue()->AsConstant(); + HConstant* cst_false_value = select->GetFalseValue()->AsConstant(); + bool is_true_value_constant = cst_true_value != nullptr; + bool is_false_value_constant = cst_false_value != nullptr; + // Ask VIXL whether we should synthesize constants in registers. + // We give an arbitrary register to VIXL when dealing with non-constant inputs. + Operand true_op = is_true_value_constant ? + Operand(Int64FromConstant(cst_true_value)) : Operand(x1); + Operand false_op = is_false_value_constant ? + Operand(Int64FromConstant(cst_false_value)) : Operand(x2); + bool true_value_in_register = false; + bool false_value_in_register = false; + MacroAssembler::GetCselSynthesisInformation( + x0, true_op, false_op, &true_value_in_register, &false_value_in_register); + true_value_in_register |= !is_true_value_constant; + false_value_in_register |= !is_false_value_constant; + + locations->SetInAt(1, true_value_in_register ? Location::RequiresRegister() + : Location::ConstantLocation(cst_true_value)); + locations->SetInAt(0, false_value_in_register ? Location::RequiresRegister() + : Location::ConstantLocation(cst_false_value)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } + + if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) { + locations->SetInAt(2, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) { + HInstruction* cond = select->GetCondition(); + Condition csel_cond; + + if (IsBooleanValueOrMaterializedCondition(cond)) { + if (cond->IsCondition() && cond->GetNext() == select) { + // Use the condition flags set by the previous instruction. + csel_cond = GetConditionForSelect(cond->AsCondition()); + } else { + __ Cmp(InputRegisterAt(select, 2), 0); + csel_cond = ne; + } + } else if (IsConditionOnFloatingPointValues(cond)) { + GenerateFcmp(cond); + csel_cond = GetConditionForSelect(cond->AsCondition()); + } else { + __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1)); + csel_cond = GetConditionForSelect(cond->AsCondition()); + } + + if (DataType::IsFloatingPointType(select->GetType())) { + __ Fcsel(OutputFPRegister(select), + InputFPRegisterAt(select, 1), + InputFPRegisterAt(select, 0), + csel_cond); + } else { + __ Csel(OutputRegister(select), + InputOperandAt(select, 1), + InputOperandAt(select, 0), + csel_cond); + } +} + +void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) { + new (GetGraph()->GetAllocator()) LocationSummary(info); +} + +void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) { + // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile. +} + +void CodeGeneratorARM64::GenerateNop() { + __ Nop(); +} + +void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { + HandleFieldSet(instruction); +} + +void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); +} + +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && + (kUseBakerReadBarrier || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; +} + +// Interface case has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + return 3; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); +} + +void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + bool baker_read_barrier_slow_path = false; + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kAbstractClassCheck: + case TypeCheckKind::kClassHierarchyCheck: + case TypeCheckKind::kArrayObjectCheck: { + bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction); + call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; + baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier; + break; + } + case TypeCheckKind::kArrayCheck: + case TypeCheckKind::kUnresolvedCheck: + case TypeCheckKind::kInterfaceCheck: + call_kind = LocationSummary::kCallOnSlowPath; + break; + case TypeCheckKind::kBitstringCheck: + break; + } + + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); + if (baker_read_barrier_slow_path) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + locations->SetInAt(0, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } + // The "out" register is used as a temporary, so it overlaps with the inputs. + // Note that TypeCheckSlowPathARM64 uses this register too. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + // Add temps if necessary for read barriers. + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); +} + +void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary* locations = instruction->GetLocations(); + Location obj_loc = locations->InAt(0); + Register obj = InputRegisterAt(instruction, 0); + Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? Register() + : InputRegisterAt(instruction, 1); + Location out_loc = locations->Out(); + Register out = OutputRegister(instruction); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + + vixl::aarch64::Label done, zero; + SlowPathCodeARM64* slow_path = nullptr; + + // Return 0 if `obj` is null. + // Avoid null check if we know `obj` is not null. + if (instruction->MustDoNullCheck()) { + __ Cbz(obj, &zero); + } + + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + __ Cmp(out, cls); + __ Cset(out, eq); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + + case TypeCheckKind::kAbstractClassCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // If the class is abstract, we eagerly fetch the super class of the + // object to avoid doing a comparison we know will fail. + vixl::aarch64::Label loop, success; + __ Bind(&loop); + // /* HeapReference */ out = out->super_class_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + read_barrier_option); + // If `out` is null, we use it for the result, and jump to `done`. + __ Cbz(out, &done); + __ Cmp(out, cls); + __ B(ne, &loop); + __ Mov(out, 1); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + + case TypeCheckKind::kClassHierarchyCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // Walk over the class hierarchy to find a match. + vixl::aarch64::Label loop, success; + __ Bind(&loop); + __ Cmp(out, cls); + __ B(eq, &success); + // /* HeapReference */ out = out->super_class_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + read_barrier_option); + __ Cbnz(out, &loop); + // If `out` is null, we use it for the result, and jump to `done`. + __ B(&done); + __ Bind(&success); + __ Mov(out, 1); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + + case TypeCheckKind::kArrayObjectCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // Do an exact check. + vixl::aarch64::Label exact_check; + __ Cmp(out, cls); + __ B(eq, &exact_check); + // Otherwise, we need to check that the object's class is a non-primitive array. + // /* HeapReference */ out = out->component_type_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + component_offset, + maybe_temp_loc, + read_barrier_option); + // If `out` is null, we use it for the result, and jump to `done`. + __ Cbz(out, &done); + __ Ldrh(out, HeapOperand(out, primitive_offset)); + static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); + __ Cbnz(out, &zero); + __ Bind(&exact_check); + __ Mov(out, 1); + __ B(&done); + break; + } + + case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); + __ Cmp(out, cls); + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, /* is_fatal= */ false); + codegen_->AddSlowPath(slow_path); + __ B(ne, slow_path->GetEntryLabel()); + __ Mov(out, 1); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + + case TypeCheckKind::kUnresolvedCheck: + case TypeCheckKind::kInterfaceCheck: { + // Note that we indeed only call on slow path, but we always go + // into the slow path for the unresolved and interface check + // cases. + // + // We cannot directly call the InstanceofNonTrivial runtime + // entry point without resorting to a type checking slow path + // here (i.e. by calling InvokeRuntime directly), as it would + // require to assign fixed registers for the inputs of this + // HInstanceOf instruction (following the runtime calling + // convention), which might be cluttered by the potential first + // read barrier emission at the beginning of this method. + // + // TODO: Introduce a new runtime entry point taking the object + // to test (instead of its class) as argument, and let it deal + // with the read barrier issues. This will let us refactor this + // case of the `switch` code as it was previously (with a direct + // call to the runtime not using a type checking slow path). + // This should also be beneficial for the other cases above. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, /* is_fatal= */ false); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, out); + __ Cset(out, eq); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } + } + + if (zero.IsLinked()) { + __ Bind(&zero); + __ Mov(out, 0); + } + + if (done.IsLinked()) { + __ Bind(&done); + } + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } +} + +void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } + // Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64. + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); +} + +void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary* locations = instruction->GetLocations(); + Location obj_loc = locations->InAt(0); + Register obj = InputRegisterAt(instruction, 0); + Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? Register() + : InputRegisterAt(instruction, 1); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_GE(num_temps, 1u); + DCHECK_LE(num_temps, 3u); + Location temp_loc = locations->GetTemp(0); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation(); + Register temp = WRegisterFrom(temp_loc); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + + bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction); + SlowPathCodeARM64* type_check_slow_path = + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( + instruction, is_type_check_slow_path_fatal); + codegen_->AddSlowPath(type_check_slow_path); + + vixl::aarch64::Label done; + // Avoid null check if we know obj is not null. + if (instruction->MustDoNullCheck()) { + __ Cbz(obj, &done); + } + + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kArrayCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + __ Cmp(temp, cls); + // Jump to slow path for throwing the exception or doing a + // more involved array check. + __ B(ne, type_check_slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class is abstract, we eagerly fetch the super class of the + // object to avoid doing a comparison we know will fail. + vixl::aarch64::Label loop; + __ Bind(&loop); + // /* HeapReference */ temp = temp->super_class_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ Cbz(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, compare classes. + __ Cmp(temp, cls); + __ B(ne, &loop); + break; + } + + case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // Walk over the class hierarchy to find a match. + vixl::aarch64::Label loop; + __ Bind(&loop); + __ Cmp(temp, cls); + __ B(eq, &done); + + // /* HeapReference */ temp = temp->super_class_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class reference currently in `temp` is not null, jump + // back at the beginning of the loop. + __ Cbnz(temp, &loop); + // Otherwise, jump to the slow path to throw the exception. + __ B(type_check_slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // Do an exact check. + __ Cmp(temp, cls); + __ B(eq, &done); + + // Otherwise, we need to check that the object's class is a non-primitive array. + // /* HeapReference */ temp = temp->component_type_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + component_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the component type is null, jump to the slow path to throw the exception. + __ Cbz(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, the object is indeed an array. Further check that this component type is not a + // primitive type. + __ Ldrh(temp, HeapOperand(temp, primitive_offset)); + static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); + __ Cbnz(temp, type_check_slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kUnresolvedCheck: + // We always go into the type check slow path for the unresolved check cases. + // + // We cannot directly call the CheckCast runtime entry point + // without resorting to a type checking slow path here (i.e. by + // calling InvokeRuntime directly), as it would require to + // assign fixed registers for the inputs of this HInstanceOf + // instruction (following the runtime calling convention), which + // might be cluttered by the potential first read barrier + // emission at the beginning of this method. + __ B(type_check_slow_path->GetEntryLabel()); + break; + case TypeCheckKind::kInterfaceCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // /* HeapReference */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // Iftable is never null. + __ Ldr(WRegisterFrom(maybe_temp2_loc), HeapOperand(temp.W(), array_length_offset)); + // Loop through the iftable and check if any class matches. + vixl::aarch64::Label start_loop; + __ Bind(&start_loop); + __ Cbz(WRegisterFrom(maybe_temp2_loc), type_check_slow_path->GetEntryLabel()); + __ Ldr(WRegisterFrom(maybe_temp3_loc), HeapOperand(temp.W(), object_array_data_offset)); + GetAssembler()->MaybeUnpoisonHeapReference(WRegisterFrom(maybe_temp3_loc)); + // Go to next interface. + __ Add(temp, temp, 2 * kHeapReferenceSize); + __ Sub(WRegisterFrom(maybe_temp2_loc), WRegisterFrom(maybe_temp2_loc), 2); + // Compare the classes and continue the loop if they do not match. + __ Cmp(cls, WRegisterFrom(maybe_temp3_loc)); + __ B(ne, &start_loop); + break; + } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, temp); + __ B(ne, type_check_slow_path->GetEntryLabel()); + break; + } + } + __ Bind(&done); + + __ Bind(type_check_slow_path->GetExitLabel()); +} + +void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { + // The trampoline uses the same calling convention as dex calling conventions, + // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain + // the method_idx. + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { + codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { + InvokeDexCallingConventionVisitorARM64 calling_convention_visitor; + CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor); +} + +void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { + HandleInvoke(invoke); +} + +void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, + Register klass) { + DCHECK_EQ(klass.GetCode(), 0u); + // We know the destination of an intrinsic, so no need to record inline + // caches. + if (!instruction->GetLocations()->Intrinsified() && + GetGraph()->IsCompilingBaseline() && + !Runtime::Current()->IsAotCompiler()) { + DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + ScopedObjectAccess soa(Thread::Current()); + ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize); + if (info != nullptr) { + InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); + uint64_t address = reinterpret_cast64(cache); + vixl::aarch64::Label done; + __ Mov(x8, address); + __ Ldr(x9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass, x9); + __ B(eq, &done); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } + } +} + +void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { + // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. + LocationSummary* locations = invoke->GetLocations(); + Register temp = XRegisterFrom(locations->GetTemp(0)); + Location receiver = locations->InAt(0); + Offset class_offset = mirror::Object::ClassOffset(); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize); + + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + if (receiver.IsStackSlot()) { + __ Ldr(temp.W(), StackOperandFrom(receiver)); + { + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + // /* HeapReference */ temp = temp->klass_ + __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset)); + codegen_->MaybeRecordImplicitNullCheck(invoke); + } + } else { + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + // /* HeapReference */ temp = receiver->klass_ + __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); + codegen_->MaybeRecordImplicitNullCheck(invoke); + } + + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); + + // If we're compiling baseline, update the inline cache. + codegen_->MaybeGenerateInlineCacheCheck(invoke, temp); + + // The register ip1 is required to be used for the hidden argument in + // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. + MacroAssembler* masm = GetVIXLAssembler(); + UseScratchRegisterScope scratch_scope(masm); + scratch_scope.Exclude(ip1); + __ Mov(ip1, invoke->GetDexMethodIndex()); + + __ Ldr(temp, + MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + uint32_t method_offset = static_cast(ImTable::OffsetOfElement( + invoke->GetImtIndex(), kArm64PointerSize)); + // temp = temp->GetImtEntryAt(method_offset); + __ Ldr(temp, MemOperand(temp, method_offset)); + // lr = temp->GetEntryPoint(); + __ Ldr(lr, MemOperand(temp, entry_point.Int32Value())); + + { + // Ensure the pc position is recorded immediately after the `blr` instruction. + ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize); + + // lr(); + __ blr(lr); + DCHECK(!codegen_->IsLeafMethod()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + } + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_); + if (intrinsic.TryDispatch(invoke)) { + return; + } + + HandleInvoke(invoke); +} + +void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { + // Explicit clinit checks triggered by static invokes must have been pruned by + // art::PrepareForRegisterAllocation. + DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); + + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_); + if (intrinsic.TryDispatch(invoke)) { + return; + } + + HandleInvoke(invoke); +} + +static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) { + if (invoke->GetLocations()->Intrinsified()) { + IntrinsicCodeGeneratorARM64 intrinsic(codegen); + intrinsic.Dispatch(invoke); + return true; + } + return false; +} + +HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch( + const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, + ArtMethod* method ATTRIBUTE_UNUSED) { + // On ARM64 we support all dispatch types. + return desired_dispatch_info; +} + +void CodeGeneratorARM64::GenerateStaticOrDirectCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) { + // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention. + Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp. + switch (invoke->GetMethodLoadKind()) { + case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: { + uint32_t offset = + GetThreadOffset(invoke->GetStringInitEntryPoint()).Int32Value(); + // temp = thread->string_init_entrypoint + __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset)); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kRecursive: + callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); + break; + case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: { + DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()); + // Add ADRP with its PC-relative method patch. + vixl::aarch64::Label* adrp_label = NewBootImageMethodPatch(invoke->GetTargetMethod()); + EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp)); + // Add ADD with its PC-relative method patch. + vixl::aarch64::Label* add_label = + NewBootImageMethodPatch(invoke->GetTargetMethod(), adrp_label); + EmitAddPlaceholder(add_label, XRegisterFrom(temp), XRegisterFrom(temp)); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + uint32_t boot_image_offset = GetBootImageOffset(invoke); + vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset); + EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp)); + // Add LDR with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label); + // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load. + EmitLdrOffsetPlaceholder(ldr_label, WRegisterFrom(temp), XRegisterFrom(temp)); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { + // Add ADRP with its PC-relative .bss entry patch. + MethodReference target_method(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()); + vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(target_method); + EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp)); + // Add LDR with its PC-relative .bss entry patch. + vixl::aarch64::Label* ldr_label = + NewMethodBssEntryPatch(target_method, adrp_label); + // All aligned loads are implicitly atomic consume operations on ARM64. + EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp)); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress: + // Load method address from literal pool. + __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress())); + break; + case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: { + GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path); + return; // No code pointer retrieval; the runtime performs the call directly. + } + } + + switch (invoke->GetCodePtrLocation()) { + case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + ExactAssemblyScope eas(GetVIXLAssembler(), + kInstructionSize, + CodeBufferCheckScope::kExactSize); + __ bl(&frame_entry_label_); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } + break; + case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod: + // LR = callee_method->entry_point_from_quick_compiled_code_; + __ Ldr(lr, MemOperand( + XRegisterFrom(callee_method), + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value())); + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + ExactAssemblyScope eas(GetVIXLAssembler(), + kInstructionSize, + CodeBufferCheckScope::kExactSize); + // lr() + __ blr(lr); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } + break; + } + + DCHECK(!IsLeafMethod()); +} + +void CodeGeneratorARM64::GenerateVirtualCall( + HInvokeVirtual* invoke, Location temp_in, SlowPathCode* slow_path) { + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + Register receiver = calling_convention.GetRegisterAt(0); + Register temp = XRegisterFrom(temp_in); + size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArm64PointerSize).SizeValue(); + Offset class_offset = mirror::Object::ClassOffset(); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize); + + DCHECK(receiver.IsRegister()); + + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + // /* HeapReference */ temp = receiver->klass_ + __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset)); + MaybeRecordImplicitNullCheck(invoke); + } + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + GetAssembler()->MaybeUnpoisonHeapReference(temp.W()); + + // If we're compiling baseline, update the inline cache. + MaybeGenerateInlineCacheCheck(invoke, temp); + + // temp = temp->GetMethodAt(method_offset); + __ Ldr(temp, MemOperand(temp, method_offset)); + // lr = temp->GetEntryPoint(); + __ Ldr(lr, MemOperand(temp, entry_point.SizeValue())); + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize); + // lr(); + __ blr(lr); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } +} + +void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { + codegen_->GenerateInvokePolymorphicCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) { + codegen_->GenerateInvokeCustomCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch( + uint32_t intrinsic_data, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_other_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch( + uint32_t boot_image_offset, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_other_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch( + MethodReference target_method, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + target_method.dex_file, target_method.index, adrp_label, &boot_image_method_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewMethodBssEntryPatch( + MethodReference target_method, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + target_method.dex_file, target_method.index, adrp_label, &method_bss_entry_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageTypePatch( + const DexFile& dex_file, + dex::TypeIndex type_index, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch(&dex_file, type_index.index_, adrp_label, &boot_image_type_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBssEntryTypePatch( + const DexFile& dex_file, + dex::TypeIndex type_index, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch(&dex_file, type_index.index_, adrp_label, &type_bss_entry_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageStringPatch( + const DexFile& dex_file, + dex::StringIndex string_index, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + &dex_file, string_index.index_, adrp_label, &boot_image_string_patches_); +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewStringBssEntryPatch( + const DexFile& dex_file, + dex::StringIndex string_index, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch(&dex_file, string_index.index_, adrp_label, &string_bss_entry_patches_); +} + +void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) { + DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. + DCHECK(!Runtime::Current()->UseJitCompilation()); + call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value()); + vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label; + __ bind(bl_label); + __ bl(static_cast(0)); // Placeholder, patched at link-time. +} + +void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) { + DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. + if (Runtime::Current()->UseJitCompilation()) { + auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data); + vixl::aarch64::Label* slow_path_entry = &it->second.label; + __ cbnz(mr, slow_path_entry); + } else { + baker_read_barrier_patches_.emplace_back(custom_data); + vixl::aarch64::Label* cbnz_label = &baker_read_barrier_patches_.back().label; + __ bind(cbnz_label); + __ cbnz(mr, static_cast(0)); // Placeholder, patched at link-time. + } +} + +vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch( + const DexFile* dex_file, + uint32_t offset_or_index, + vixl::aarch64::Label* adrp_label, + ArenaDeque* patches) { + // Add a patch entry and return the label. + patches->emplace_back(dex_file, offset_or_index); + PcRelativePatchInfo* info = &patches->back(); + vixl::aarch64::Label* label = &info->label; + // If adrp_label is null, this is the ADRP patch and needs to point to its own label. + info->pc_insn_label = (adrp_label != nullptr) ? adrp_label : label; + return label; +} + +vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral( + uint64_t address) { + return DeduplicateUint32Literal(dchecked_integral_cast(address)); +} + +vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateJitStringLiteral( + const DexFile& dex_file, dex::StringIndex string_index, Handle handle) { + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); + return jit_string_patches_.GetOrCreate( + StringReference(&dex_file, string_index), + [this]() { return __ CreateLiteralDestroyedWithPool(/* value= */ 0u); }); +} + +vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateJitClassLiteral( + const DexFile& dex_file, dex::TypeIndex type_index, Handle handle) { + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); + return jit_class_patches_.GetOrCreate( + TypeReference(&dex_file, type_index), + [this]() { return __ CreateLiteralDestroyedWithPool(/* value= */ 0u); }); +} + +void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, + vixl::aarch64::Register reg) { + DCHECK(reg.IsX()); + SingleEmissionCheckScope guard(GetVIXLAssembler()); + __ Bind(fixup_label); + __ adrp(reg, /* offset placeholder */ static_cast(0)); +} + +void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label, + vixl::aarch64::Register out, + vixl::aarch64::Register base) { + DCHECK(out.IsX()); + DCHECK(base.IsX()); + SingleEmissionCheckScope guard(GetVIXLAssembler()); + __ Bind(fixup_label); + __ add(out, base, Operand(/* offset placeholder */ 0)); +} + +void CodeGeneratorARM64::EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label, + vixl::aarch64::Register out, + vixl::aarch64::Register base) { + DCHECK(base.IsX()); + SingleEmissionCheckScope guard(GetVIXLAssembler()); + __ Bind(fixup_label); + __ ldr(out, MemOperand(base, /* offset placeholder */ 0)); +} + +void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg, + uint32_t boot_image_reference) { + if (GetCompilerOptions().IsBootImage()) { + // Add ADRP with its PC-relative type patch. + vixl::aarch64::Label* adrp_label = NewBootImageIntrinsicPatch(boot_image_reference); + EmitAdrpPlaceholder(adrp_label, reg.X()); + // Add ADD with its PC-relative type patch. + vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label); + EmitAddPlaceholder(add_label, reg.X(), reg.X()); + } else if (GetCompilerOptions().GetCompilePic()) { + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference); + EmitAdrpPlaceholder(adrp_label, reg.X()); + // Add LDR with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label); + EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X()); + } else { + DCHECK(Runtime::Current()->UseJitCompilation()); + gc::Heap* heap = Runtime::Current()->GetHeap(); + DCHECK(!heap->GetBootImageSpaces().empty()); + const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference; + __ Ldr(reg.W(), DeduplicateBootImageAddressLiteral(reinterpret_cast(address))); + } +} + +void CodeGeneratorARM64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, + uint32_t boot_image_offset) { + DCHECK(invoke->IsStatic()); + InvokeRuntimeCallingConvention calling_convention; + Register argument = calling_convention.GetRegisterAt(0); + if (GetCompilerOptions().IsBootImage()) { + DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference); + // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative. + MethodReference target_method = invoke->GetTargetMethod(); + dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_; + // Add ADRP with its PC-relative type patch. + vixl::aarch64::Label* adrp_label = NewBootImageTypePatch(*target_method.dex_file, type_idx); + EmitAdrpPlaceholder(adrp_label, argument.X()); + // Add ADD with its PC-relative type patch. + vixl::aarch64::Label* add_label = + NewBootImageTypePatch(*target_method.dex_file, type_idx, adrp_label); + EmitAddPlaceholder(add_label, argument.X(), argument.X()); + } else { + LoadBootImageAddress(argument, boot_image_offset); + } + InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes(); +} + +template +inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches( + const ArenaDeque& infos, + ArenaVector* linker_patches) { + for (const PcRelativePatchInfo& info : infos) { + linker_patches->push_back(Factory(info.label.GetLocation(), + info.target_dex_file, + info.pc_insn_label->GetLocation(), + info.offset_or_index)); + } +} + +template +linker::LinkerPatch NoDexFileAdapter(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t boot_image_offset) { + DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null. + return Factory(literal_offset, pc_insn_offset, boot_image_offset); +} + +void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector* linker_patches) { + DCHECK(linker_patches->empty()); + size_t size = + boot_image_method_patches_.size() + + method_bss_entry_patches_.size() + + boot_image_type_patches_.size() + + type_bss_entry_patches_.size() + + boot_image_string_patches_.size() + + string_bss_entry_patches_.size() + + boot_image_other_patches_.size() + + call_entrypoint_patches_.size() + + baker_read_barrier_patches_.size(); + linker_patches->reserve(size); + if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) { + EmitPcRelativeLinkerPatches( + boot_image_method_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + boot_image_type_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + boot_image_string_patches_, linker_patches); + } else { + DCHECK(boot_image_method_patches_.empty()); + DCHECK(boot_image_type_patches_.empty()); + DCHECK(boot_image_string_patches_.empty()); + } + if (GetCompilerOptions().IsBootImage()) { + EmitPcRelativeLinkerPatches>( + boot_image_other_patches_, linker_patches); + } else { + EmitPcRelativeLinkerPatches>( + boot_image_other_patches_, linker_patches); + } + EmitPcRelativeLinkerPatches( + method_bss_entry_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + type_bss_entry_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + string_bss_entry_patches_, linker_patches); + for (const PatchInfo& info : call_entrypoint_patches_) { + DCHECK(info.target_dex_file == nullptr); + linker_patches->push_back(linker::LinkerPatch::CallEntrypointPatch( + info.label.GetLocation(), info.offset_or_index)); + } + for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { + linker_patches->push_back(linker::LinkerPatch::BakerReadBarrierBranchPatch( + info.label.GetLocation(), info.custom_data)); + } + DCHECK_EQ(size, linker_patches->size()); +} + +bool CodeGeneratorARM64::NeedsThunkCode(const linker::LinkerPatch& patch) const { + return patch.GetType() == linker::LinkerPatch::Type::kCallEntrypoint || + patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch || + patch.GetType() == linker::LinkerPatch::Type::kCallRelative; +} + +void CodeGeneratorARM64::EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector* code, + /*out*/ std::string* debug_name) { + Arm64Assembler assembler(GetGraph()->GetAllocator()); + switch (patch.GetType()) { + case linker::LinkerPatch::Type::kCallRelative: { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64PointerSize).Int32Value()); + assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + *debug_name = "MethodCallThunk"; + } + break; + } + case linker::LinkerPatch::Type::kCallEntrypoint: { + Offset offset(patch.EntrypointOffset()); + assembler.JumpTo(ManagedRegister(arm64::TR), offset, ManagedRegister(arm64::IP0)); + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + *debug_name = "EntrypointCallThunk_" + std::to_string(offset.Uint32Value()); + } + break; + } + case linker::LinkerPatch::Type::kBakerReadBarrierBranch: { + DCHECK_EQ(patch.GetBakerCustomValue2(), 0u); + CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); + break; + } + default: + LOG(FATAL) << "Unexpected patch type " << patch.GetType(); + UNREACHABLE(); + } + + // Ensure we emit the literal pool if any. + assembler.FinalizeCode(); + code->resize(assembler.CodeSize()); + MemoryRegion code_region(code->data(), code->size()); + assembler.FinalizeInstructions(code_region); +} + +vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) { + return uint32_literals_.GetOrCreate( + value, + [this, value]() { return __ CreateLiteralDestroyedWithPool(value); }); +} + +vixl::aarch64::Literal* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) { + return uint64_literals_.GetOrCreate( + value, + [this, value]() { return __ CreateLiteralDestroyedWithPool(value); }); +} + +void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { + // Explicit clinit checks triggered by static invokes must have been pruned by + // art::PrepareForRegisterAllocation. + DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); + + if (TryGenerateIntrinsicCode(invoke, codegen_)) { + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + return; + } + + { + // Ensure that between the BLR (emitted by GenerateStaticOrDirectCall) and RecordPcInfo there + // are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes); + LocationSummary* locations = invoke->GetLocations(); + codegen_->GenerateStaticOrDirectCall( + invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation()); + } + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { + if (TryGenerateIntrinsicCode(invoke, codegen_)) { + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + return; + } + + { + // Ensure that between the BLR (emitted by GenerateVirtualCall) and RecordPcInfo there + // are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kInvokeCodeMarginSizeInBytes); + codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0)); + DCHECK(!codegen_->IsLeafMethod()); + } + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind( + HLoadClass::LoadKind desired_class_load_kind) { + switch (desired_class_load_kind) { + case HLoadClass::LoadKind::kInvalid: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + case HLoadClass::LoadKind::kReferrersClass: + break; + case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: + case HLoadClass::LoadKind::kBootImageRelRo: + case HLoadClass::LoadKind::kBssEntry: + DCHECK(!Runtime::Current()->UseJitCompilation()); + break; + case HLoadClass::LoadKind::kJitBootImageAddress: + case HLoadClass::LoadKind::kJitTableAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); + break; + case HLoadClass::LoadKind::kRuntimeCall: + break; + } + return desired_class_load_kind; +} + +void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { + HLoadClass::LoadKind load_kind = cls->GetLoadKind(); + if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { + InvokeRuntimeCallingConvention calling_convention; + CodeGenerator::CreateLoadClassRuntimeCallLocationSummary( + cls, + LocationFrom(calling_convention.GetRegisterAt(0)), + LocationFrom(vixl::aarch64::x0)); + DCHECK(calling_convention.GetRegisterAt(0).Is(vixl::aarch64::x0)); + return; + } + DCHECK(!cls->NeedsAccessCheck()); + + const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall; + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); + if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + + if (load_kind == HLoadClass::LoadKind::kReferrersClass) { + locations->SetInAt(0, Location::RequiresRegister()); + } + locations->SetOut(Location::RequiresRegister()); + if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) { + if (!kUseReadBarrier || kUseBakerReadBarrier) { + // Rely on the type resolution or initialization and marking to save everything we need. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); + } else { + // For non-Baker read barrier we have a temp-clobbering call. + } + } +} + +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS { + HLoadClass::LoadKind load_kind = cls->GetLoadKind(); + if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { + codegen_->GenerateLoadClassRuntimeCall(cls); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + return; + } + DCHECK(!cls->NeedsAccessCheck()); + + Location out_loc = cls->GetLocations()->Out(); + Register out = OutputRegister(cls); + + const ReadBarrierOption read_barrier_option = cls->IsInBootImage() + ? kWithoutReadBarrier + : kCompilerReadBarrierOption; + bool generate_null_check = false; + switch (load_kind) { + case HLoadClass::LoadKind::kReferrersClass: { + DCHECK(!cls->CanCallRuntime()); + DCHECK(!cls->MustGenerateClinitCheck()); + // /* GcRoot */ out = current_method->declaring_class_ + Register current_method = InputRegisterAt(cls, 0); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + current_method, + ArtMethod::DeclaringClassOffset().Int32Value(), + /* fixup_label= */ nullptr, + read_barrier_option); + break; + } + case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: { + DCHECK(codegen_->GetCompilerOptions().IsBootImage() || + codegen_->GetCompilerOptions().IsBootImageExtension()); + DCHECK_EQ(read_barrier_option, kWithoutReadBarrier); + // Add ADRP with its PC-relative type patch. + const DexFile& dex_file = cls->GetDexFile(); + dex::TypeIndex type_index = cls->GetTypeIndex(); + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageTypePatch(dex_file, type_index); + codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); + // Add ADD with its PC-relative type patch. + vixl::aarch64::Label* add_label = + codegen_->NewBootImageTypePatch(dex_file, type_index, adrp_label); + codegen_->EmitAddPlaceholder(add_label, out.X(), out.X()); + break; + } + case HLoadClass::LoadKind::kBootImageRelRo: { + DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); + uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls); + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset); + codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); + // Add LDR with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* ldr_label = + codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label); + codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X()); + break; + } + case HLoadClass::LoadKind::kBssEntry: { + // Add ADRP with its PC-relative Class .bss entry patch. + const DexFile& dex_file = cls->GetDexFile(); + dex::TypeIndex type_index = cls->GetTypeIndex(); + vixl::aarch64::Register temp = XRegisterFrom(out_loc); + vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index); + codegen_->EmitAdrpPlaceholder(adrp_label, temp); + // Add LDR with its PC-relative Class .bss entry patch. + vixl::aarch64::Label* ldr_label = + codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label); + // /* GcRoot */ out = *(base_address + offset) /* PC-relative */ + // All aligned loads are implicitly atomic consume operations on ARM64. + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + read_barrier_option); + generate_null_check = true; + break; + } + case HLoadClass::LoadKind::kJitBootImageAddress: { + DCHECK_EQ(read_barrier_option, kWithoutReadBarrier); + uint32_t address = reinterpret_cast32(cls->GetClass().Get()); + DCHECK_NE(address, 0u); + __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); + break; + } + case HLoadClass::LoadKind::kJitTableAddress: { + __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), + cls->GetTypeIndex(), + cls->GetClass())); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + out.X(), + /* offset= */ 0, + /* fixup_label= */ nullptr, + read_barrier_option); + break; + } + case HLoadClass::LoadKind::kRuntimeCall: + case HLoadClass::LoadKind::kInvalid: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } + + bool do_clinit = cls->MustGenerateClinitCheck(); + if (generate_null_check || do_clinit) { + DCHECK(cls->CanCallRuntime()); + SlowPathCodeARM64* slow_path = + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(cls, cls); + codegen_->AddSlowPath(slow_path); + if (generate_null_check) { + __ Cbz(out, slow_path->GetEntryLabel()); + } + if (cls->MustGenerateClinitCheck()) { + GenerateClassInitializationCheck(slow_path, out); + } else { + __ Bind(slow_path->GetExitLabel()); + } + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + } +} + +void LocationsBuilderARM64::VisitLoadMethodHandle(HLoadMethodHandle* load) { + InvokeRuntimeCallingConvention calling_convention; + Location location = LocationFrom(calling_convention.GetRegisterAt(0)); + CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location); +} + +void InstructionCodeGeneratorARM64::VisitLoadMethodHandle(HLoadMethodHandle* load) { + codegen_->GenerateLoadMethodHandleRuntimeCall(load); +} + +void LocationsBuilderARM64::VisitLoadMethodType(HLoadMethodType* load) { + InvokeRuntimeCallingConvention calling_convention; + Location location = LocationFrom(calling_convention.GetRegisterAt(0)); + CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(load, location, location); +} + +void InstructionCodeGeneratorARM64::VisitLoadMethodType(HLoadMethodType* load) { + codegen_->GenerateLoadMethodTypeRuntimeCall(load); +} + +static MemOperand GetExceptionTlsAddress() { + return MemOperand(tr, Thread::ExceptionOffset().Int32Value()); +} + +void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { + __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress()); +} + +void LocationsBuilderARM64::VisitClearException(HClearException* clear) { + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + __ Str(wzr, GetExceptionTlsAddress()); +} + +HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( + HLoadString::LoadKind desired_string_load_kind) { + switch (desired_string_load_kind) { + case HLoadString::LoadKind::kBootImageLinkTimePcRelative: + case HLoadString::LoadKind::kBootImageRelRo: + case HLoadString::LoadKind::kBssEntry: + DCHECK(!Runtime::Current()->UseJitCompilation()); + break; + case HLoadString::LoadKind::kJitBootImageAddress: + case HLoadString::LoadKind::kJitTableAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); + break; + case HLoadString::LoadKind::kRuntimeCall: + break; + } + return desired_string_load_kind; +} + +void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { + LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); + if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) { + InvokeRuntimeCallingConvention calling_convention; + locations->SetOut(calling_convention.GetReturnLocation(load->GetType())); + } else { + locations->SetOut(Location::RequiresRegister()); + if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) { + if (!kUseReadBarrier || kUseBakerReadBarrier) { + // Rely on the pResolveString and marking to save everything we need. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); + } else { + // For non-Baker read barrier we have a temp-clobbering call. + } + } + } +} + +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { + Register out = OutputRegister(load); + Location out_loc = load->GetLocations()->Out(); + + switch (load->GetLoadKind()) { + case HLoadString::LoadKind::kBootImageLinkTimePcRelative: { + DCHECK(codegen_->GetCompilerOptions().IsBootImage() || + codegen_->GetCompilerOptions().IsBootImageExtension()); + // Add ADRP with its PC-relative String patch. + const DexFile& dex_file = load->GetDexFile(); + const dex::StringIndex string_index = load->GetStringIndex(); + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageStringPatch(dex_file, string_index); + codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); + // Add ADD with its PC-relative String patch. + vixl::aarch64::Label* add_label = + codegen_->NewBootImageStringPatch(dex_file, string_index, adrp_label); + codegen_->EmitAddPlaceholder(add_label, out.X(), out.X()); + return; + } + case HLoadString::LoadKind::kBootImageRelRo: { + DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + uint32_t boot_image_offset = codegen_->GetBootImageOffset(load); + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset); + codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); + // Add LDR with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* ldr_label = + codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label); + codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X()); + return; + } + case HLoadString::LoadKind::kBssEntry: { + // Add ADRP with its PC-relative String .bss entry patch. + const DexFile& dex_file = load->GetDexFile(); + const dex::StringIndex string_index = load->GetStringIndex(); + Register temp = XRegisterFrom(out_loc); + vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index); + codegen_->EmitAdrpPlaceholder(adrp_label, temp); + // Add LDR with its PC-relative String .bss entry patch. + vixl::aarch64::Label* ldr_label = + codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label); + // /* GcRoot */ out = *(base_address + offset) /* PC-relative */ + // All aligned loads are implicitly atomic consume operations on ARM64. + codegen_->GenerateGcRootFieldLoad(load, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + kCompilerReadBarrierOption); + SlowPathCodeARM64* slow_path = + new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load); + codegen_->AddSlowPath(slow_path); + __ Cbz(out.X(), slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); + return; + } + case HLoadString::LoadKind::kJitBootImageAddress: { + uint32_t address = reinterpret_cast32(load->GetString().Get()); + DCHECK_NE(address, 0u); + __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); + return; + } + case HLoadString::LoadKind::kJitTableAddress: { + __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), + load->GetStringIndex(), + load->GetString())); + codegen_->GenerateGcRootFieldLoad(load, + out_loc, + out.X(), + /* offset= */ 0, + /* fixup_label= */ nullptr, + kCompilerReadBarrierOption); + return; + } + default: + break; + } + + // TODO: Re-add the compiler code to do string dex cache lookup again. + InvokeRuntimeCallingConvention calling_convention; + DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), out.GetCode()); + __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_); + codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc()); + CheckEntrypointTypes(); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); +} + +void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { + codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject, + instruction, + instruction->GetDexPc()); + if (instruction->IsEnter()) { + CheckEntrypointTypes(); + } else { + CheckEntrypointTypes(); + } + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitMul(HMul* mul) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); + switch (mul->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { + switch (mul->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); + break; + + default: + LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); + } +} + +void LocationsBuilderARM64::VisitNeg(HNeg* neg) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); + switch (neg->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { + switch (neg->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); + break; + + default: + LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); + } +} + +void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConvention calling_convention; + locations->SetOut(LocationFrom(x0)); + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); +} + +void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference. + QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction); + codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference)); +} + +void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { + codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitNot(HNot* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { + switch (instruction->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); + break; + + default: + LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); + } +} + +void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) { + __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::aarch64::Operand(1)); +} + +void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + locations->SetInAt(0, Location::RequiresRegister()); +} + +void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { + if (CanMoveNullCheckToUser(instruction)) { + return; + } + { + // Ensure that between load and RecordPcInfo there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + Location obj = instruction->GetLocations()->InAt(0); + __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); + RecordPcInfo(instruction, instruction->GetDexPc()); + } +} + +void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { + SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) NullCheckSlowPathARM64(instruction); + AddSlowPath(slow_path); + + LocationSummary* locations = instruction->GetLocations(); + Location obj = locations->InAt(0); + + __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); +} + +void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { + codegen_->GenerateNullCheck(instruction); +} + +void LocationsBuilderARM64::VisitOr(HOr* instruction) { + HandleBinaryOp(instruction); +} + +void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { + HandleBinaryOp(instruction); +} + +void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { + if (instruction->GetNext()->IsSuspendCheck() && + instruction->GetBlock()->GetLoopInformation() != nullptr) { + HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck(); + // The back edge will generate the suspend check. + codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction); + } + + codegen_->GetMoveResolver()->EmitNativeCode(instruction); +} + +void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); + if (location.IsStackSlot()) { + location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); + } else if (location.IsDoubleStackSlot()) { + location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); + } + locations->SetOut(location); +} + +void InstructionCodeGeneratorARM64::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the parameter is already at its location. +} + +void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(LocationFrom(kArtMethodRegister)); +} + +void InstructionCodeGeneratorARM64::VisitCurrentMethod( + HCurrentMethod* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the method is already at its location. +} + +void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { + locations->SetInAt(i, Location::Any()); + } + locations->SetOut(Location::Any()); +} + +void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unreachable"; +} + +void LocationsBuilderARM64::VisitRem(HRem* rem) { + DataType::Type type = rem->GetResultType(); + LocationSummary::CallKind call_kind = + DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly + : LocationSummary::kNoCall; + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); + + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); + locations->SetOut(calling_convention.GetReturnLocation(type)); + + break; + } + + default: + LOG(FATAL) << "Unexpected rem type " << type; + } +} + +void InstructionCodeGeneratorARM64::GenerateIntRemForPower2Denom(HRem *instruction) { + int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); + uint64_t abs_imm = static_cast(AbsOrMin(imm)); + DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm; + + Register out = OutputRegister(instruction); + Register dividend = InputRegisterAt(instruction, 0); + + if (abs_imm == 2) { + __ Cmp(dividend, 0); + __ And(out, dividend, 1); + __ Csneg(out, out, out, ge); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temps.AcquireSameSizeAs(out); + + __ Negs(temp, dividend); + __ And(out, dividend, abs_imm - 1); + __ And(temp, temp, abs_imm - 1); + __ Csneg(out, out, temp, mi); + } +} + +void InstructionCodeGeneratorARM64::GenerateIntRemForConstDenom(HRem *instruction) { + int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); + + if (imm == 0) { + // Do not generate anything. + // DivZeroCheck would prevent any code to be executed. + return; + } + + if (IsPowerOfTwo(AbsOrMin(imm))) { + // Cases imm == -1 or imm == 1 are handled in constant folding by + // InstructionWithAbsorbingInputSimplifier. + // If the cases have survided till code generation they are handled in + // GenerateIntRemForPower2Denom becauses -1 and 1 are the power of 2 (2^0). + // The correct code is generated for them, just more instructions. + GenerateIntRemForPower2Denom(instruction); + } else { + DCHECK(imm < -2 || imm > 2) << imm; + GenerateDivRemWithAnyConstant(instruction); + } +} + +void InstructionCodeGeneratorARM64::GenerateIntRem(HRem* instruction) { + DCHECK(DataType::IsIntOrLongType(instruction->GetResultType())) + << instruction->GetResultType(); + + if (instruction->GetLocations()->InAt(1).IsConstant()) { + GenerateIntRemForConstDenom(instruction); + } else { + Register out = OutputRegister(instruction); + Register dividend = InputRegisterAt(instruction, 0); + Register divisor = InputRegisterAt(instruction, 1); + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temps.AcquireSameSizeAs(out); + __ Sdiv(temp, dividend, divisor); + __ Msub(out, temp, divisor, dividend); + } +} + +void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { + DataType::Type type = rem->GetResultType(); + + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + GenerateIntRem(rem); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + QuickEntrypointEnum entrypoint = + (type == DataType::Type::kFloat32) ? kQuickFmodf : kQuickFmod; + codegen_->InvokeRuntime(entrypoint, rem, rem->GetDexPc()); + if (type == DataType::Type::kFloat32) { + CheckEntrypointTypes(); + } else { + CheckEntrypointTypes(); + } + break; + } + + default: + LOG(FATAL) << "Unexpected rem type " << type; + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitMin(HMin* min) { + HandleBinaryOp(min); +} + +void InstructionCodeGeneratorARM64::VisitMin(HMin* min) { + HandleBinaryOp(min); +} + +void LocationsBuilderARM64::VisitMax(HMax* max) { + HandleBinaryOp(max); +} + +void InstructionCodeGeneratorARM64::VisitMax(HMax* max) { + HandleBinaryOp(max); +} + +void LocationsBuilderARM64::VisitAbs(HAbs* abs) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs); + switch (abs->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::VisitAbs(HAbs* abs) { + switch (abs->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + Register in_reg = InputRegisterAt(abs, 0); + Register out_reg = OutputRegister(abs); + __ Cmp(in_reg, Operand(0)); + __ Cneg(out_reg, in_reg, lt); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + VRegister in_reg = InputFPRegisterAt(abs, 0); + VRegister out_reg = OutputFPRegister(abs); + __ Fabs(out_reg, in_reg); + break; + } + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + +void LocationsBuilderARM64::VisitConstructorFence(HConstructorFence* constructor_fence) { + constructor_fence->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitConstructorFence( + HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); +} + +void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { + memory_barrier->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { + codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); +} + +void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + DataType::Type return_type = instruction->InputAt(0)->GetType(); + locations->SetInAt(0, ARM64ReturnLocation(return_type)); +} + +void InstructionCodeGeneratorARM64::VisitReturn(HReturn* ret) { + if (GetGraph()->IsCompilingOsr()) { + // To simplify callers of an OSR method, we put the return value in both + // floating point and core register. + switch (ret->InputAt(0)->GetType()) { + case DataType::Type::kFloat32: + __ Fmov(w0, s0); + break; + case DataType::Type::kFloat64: + __ Fmov(x0, d0); + break; + default: + break; + } + } + codegen_->GenerateFrameExit(); +} + +void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { + instruction->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) { + codegen_->GenerateFrameExit(); +} + +void LocationsBuilderARM64::VisitRor(HRor* ror) { + HandleBinaryOp(ror); +} + +void InstructionCodeGeneratorARM64::VisitRor(HRor* ror) { + HandleBinaryOp(ror); +} + +void LocationsBuilderARM64::VisitShl(HShl* shl) { + HandleShift(shl); +} + +void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { + HandleShift(shl); +} + +void LocationsBuilderARM64::VisitShr(HShr* shr) { + HandleShift(shr); +} + +void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { + HandleShift(shr); +} + +void LocationsBuilderARM64::VisitSub(HSub* instruction) { + HandleBinaryOp(instruction); +} + +void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { + HandleBinaryOp(instruction); +} + +void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { + HandleFieldSet(instruction); +} + +void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); +} + +void LocationsBuilderARM64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) { + codegen_->CreateStringBuilderAppendLocations(instruction, LocationFrom(x0)); +} + +void InstructionCodeGeneratorARM64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) { + __ Mov(w0, instruction->GetFormat()->GetValue()); + codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc()); +} + +void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); + // In suspend check slow path, usually there are no caller-save registers at all. + // If SIMD instructions are present, however, we force spilling all live SIMD + // registers in full width (since the runtime only saves/restores lower part). + locations->SetCustomSlowPathCallerSaves( + GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty()); +} + +void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { + HBasicBlock* block = instruction->GetBlock(); + if (block->GetLoopInformation() != nullptr) { + DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); + // The back edge will generate the suspend check. + return; + } + if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { + // The goto will generate the suspend check. + return; + } + GenerateSuspendCheck(instruction, nullptr); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); +} + +void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { + codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); +} + +void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall); + DataType::Type input_type = conversion->GetInputType(); + DataType::Type result_type = conversion->GetResultType(); + DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) + << input_type << " -> " << result_type; + if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) || + (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) { + LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; + } + + if (DataType::IsFloatingPointType(input_type)) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + } else { + locations->SetInAt(0, Location::RequiresRegister()); + } + + if (DataType::IsFloatingPointType(result_type)) { + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } else { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + + DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) + << input_type << " -> " << result_type; + + if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) { + int result_size = DataType::Size(result_type); + int input_size = DataType::Size(input_type); + int min_size = std::min(result_size, input_size); + Register output = OutputRegister(conversion); + Register source = InputRegisterAt(conversion, 0); + if (result_type == DataType::Type::kInt32 && input_type == DataType::Type::kInt64) { + // 'int' values are used directly as W registers, discarding the top + // bits, so we don't need to sign-extend and can just perform a move. + // We do not pass the `kDiscardForSameWReg` argument to force clearing the + // top 32 bits of the target register. We theoretically could leave those + // bits unchanged, but we would have to make sure that no code uses a + // 32bit input value as a 64bit value assuming that the top 32 bits are + // zero. + __ Mov(output.W(), source.W()); + } else if (DataType::IsUnsignedType(result_type) || + (DataType::IsUnsignedType(input_type) && input_size < result_size)) { + __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, result_size * kBitsPerByte); + } else { + __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); + } + } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) { + __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); + } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) { + CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64); + __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); + } else if (DataType::IsFloatingPointType(result_type) && + DataType::IsFloatingPointType(input_type)) { + __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); + } else { + LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type + << " to " << result_type; + } +} + +void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { + HandleShift(ushr); +} + +void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { + HandleShift(ushr); +} + +void LocationsBuilderARM64::VisitXor(HXor* instruction) { + HandleBinaryOp(instruction); +} + +void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { + HandleBinaryOp(instruction); +} + +void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, this should be removed during prepare for register allocator. + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, this should be removed during prepare for register allocator. + LOG(FATAL) << "Unreachable"; +} + +// Simple implementation of packed switch - generate cascaded compare/jumps. +void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + Register value_reg = InputRegisterAt(switch_instr, 0); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + // Roughly set 16 as max average assemblies generated per HIR in a graph. + static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * kInstructionSize; + // ADR has a limited range(+/-1MB), so we set a threshold for the number of HIRs in the graph to + // make sure we don't emit it if the target may run out of range. + // TODO: Instead of emitting all jump tables at the end of the code, we could keep track of ADR + // ranges and emit the tables only as required. + static constexpr int32_t kJumpTableInstructionThreshold = 1* MB / kMaxExpectedSizePerHInstruction; + + if (num_entries <= kPackedSwitchCompareJumpThreshold || + // Current instruction id is an upper bound of the number of HIRs in the graph. + GetGraph()->GetCurrentInstructionId() > kJumpTableInstructionThreshold) { + // Create a series of compare/jumps. + UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); + Register temp = temps.AcquireW(); + __ Subs(temp, value_reg, Operand(lower_bound)); + + const ArenaVector& successors = switch_instr->GetBlock()->GetSuccessors(); + // Jump to successors[0] if value == lower_bound. + __ B(eq, codegen_->GetLabelOf(successors[0])); + int32_t last_index = 0; + for (; num_entries - last_index > 2; last_index += 2) { + __ Subs(temp, temp, Operand(2)); + // Jump to successors[last_index + 1] if value < case_value[last_index + 2]. + __ B(lo, codegen_->GetLabelOf(successors[last_index + 1])); + // Jump to successors[last_index + 2] if value == case_value[last_index + 2]. + __ B(eq, codegen_->GetLabelOf(successors[last_index + 2])); + } + if (num_entries - last_index == 2) { + // The last missing case_value. + __ Cmp(temp, Operand(1)); + __ B(eq, codegen_->GetLabelOf(successors[last_index + 1])); + } + + // And the default for any other value. + if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { + __ B(codegen_->GetLabelOf(default_block)); + } + } else { + JumpTableARM64* jump_table = codegen_->CreateJumpTable(switch_instr); + + UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); + + // Below instructions should use at most one blocked register. Since there are two blocked + // registers, we are free to block one. + Register temp_w = temps.AcquireW(); + Register index; + // Remove the bias. + if (lower_bound != 0) { + index = temp_w; + __ Sub(index, value_reg, Operand(lower_bound)); + } else { + index = value_reg; + } + + // Jump to default block if index is out of the range. + __ Cmp(index, Operand(num_entries)); + __ B(hs, codegen_->GetLabelOf(default_block)); + + // In current VIXL implementation, it won't require any blocked registers to encode the + // immediate value for Adr. So we are free to use both VIXL blocked registers to reduce the + // register pressure. + Register table_base = temps.AcquireX(); + // Load jump offset from the table. + __ Adr(table_base, jump_table->GetTableStartLabel()); + Register jump_offset = temp_w; + __ Ldr(jump_offset, MemOperand(table_base, index, UXTW, 2)); + + // Jump to target block by branching to table_base(pc related) + offset. + Register target_address = table_base; + __ Add(target_address, table_base, Operand(jump_offset, SXTW)); + __ Br(target_address); + } +} + +void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister( + HInstruction* instruction, + Location out, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option) { + DataType::Type type = DataType::Type::kReference; + Register out_reg = RegisterFrom(out, type); + if (read_barrier_option == kWithReadBarrier) { + CHECK(kEmitCompilerReadBarrier); + if (kUseBakerReadBarrier) { + // Load with fast path based Baker's read barrier. + // /* HeapReference */ out = *(out + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, + out, + out_reg, + offset, + maybe_temp, + /* needs_null_check= */ false, + /* use_load_acquire= */ false); + } else { + // Load with slow path based read barrier. + // Save the value of `out` into `maybe_temp` before overwriting it + // in the following move operation, as we will need it for the + // read barrier below. + Register temp_reg = RegisterFrom(maybe_temp, type); + __ Mov(temp_reg, out_reg); + // /* HeapReference */ out = *(out + offset) + __ Ldr(out_reg, HeapOperand(out_reg, offset)); + codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference */ out = *(out + offset) + __ Ldr(out_reg, HeapOperand(out_reg, offset)); + GetAssembler()->MaybeUnpoisonHeapReference(out_reg); + } +} + +void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( + HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option) { + DataType::Type type = DataType::Type::kReference; + Register out_reg = RegisterFrom(out, type); + Register obj_reg = RegisterFrom(obj, type); + if (read_barrier_option == kWithReadBarrier) { + CHECK(kEmitCompilerReadBarrier); + if (kUseBakerReadBarrier) { + // Load with fast path based Baker's read barrier. + // /* HeapReference */ out = *(obj + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, + out, + obj_reg, + offset, + maybe_temp, + /* needs_null_check= */ false, + /* use_load_acquire= */ false); + } else { + // Load with slow path based read barrier. + // /* HeapReference */ out = *(obj + offset) + __ Ldr(out_reg, HeapOperand(obj_reg, offset)); + codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference */ out = *(obj + offset) + __ Ldr(out_reg, HeapOperand(obj_reg, offset)); + GetAssembler()->MaybeUnpoisonHeapReference(out_reg); + } +} + +void CodeGeneratorARM64::GenerateGcRootFieldLoad( + HInstruction* instruction, + Location root, + Register obj, + uint32_t offset, + vixl::aarch64::Label* fixup_label, + ReadBarrierOption read_barrier_option) { + DCHECK(fixup_label == nullptr || offset == 0u); + Register root_reg = RegisterFrom(root, DataType::Type::kReference); + if (read_barrier_option == kWithReadBarrier) { + DCHECK(kEmitCompilerReadBarrier); + if (kUseBakerReadBarrier) { + // Fast path implementation of art::ReadBarrier::BarrierForRoot when + // Baker's read barrier are used. + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in + // the Marking Register) to decide whether we need to enter + // the slow path to mark the GC root. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the reference + // and jumps to the entrypoint if needed. + // + // lr = &return_address; + // GcRoot root = *(obj+offset); // Original reference load. + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto gc_root_thunk(lr) + // } + // return_address: + + UseScratchRegisterScope temps(GetVIXLAssembler()); + DCHECK(temps.IsAvailable(ip0)); + DCHECK(temps.IsAvailable(ip1)); + temps.Exclude(ip0, ip1); + uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode()); + + ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize); + vixl::aarch64::Label return_address; + __ adr(lr, &return_address); + if (fixup_label != nullptr) { + __ bind(fixup_label); + } + static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8, + "GC root LDR must be 2 instructions (8B) before the return address label."); + __ ldr(root_reg, MemOperand(obj.X(), offset)); + EmitBakerReadBarrierCbnz(custom_data); + __ bind(&return_address); + } else { + // GC root loaded through a slow path for read barriers other + // than Baker's. + // /* GcRoot* */ root = obj + offset + if (fixup_label == nullptr) { + __ Add(root_reg.X(), obj.X(), offset); + } else { + EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X()); + } + // /* mirror::Object* */ root = root->Read() + GenerateReadBarrierForRootSlow(instruction, root, root); + } + } else { + // Plain GC root load with no read barrier. + // /* GcRoot */ root = *(obj + offset) + if (fixup_label == nullptr) { + __ Ldr(root_reg, MemOperand(obj, offset)); + } else { + EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X()); + } + // Note that GC roots are not affected by heap poisoning, thus we + // do not have to unpoison `root_reg` here. + } + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); +} + +void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier( + vixl::aarch64::Register marked, + vixl::aarch64::Register old_value) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR. + uint32_t custom_data = EncodeBakerReadBarrierGcRootData(marked.GetCode()); + + ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize); + vixl::aarch64::Label return_address; + __ adr(lr, &return_address); + static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8, + "GC root LDR must be 2 instructions (8B) before the return address label."); + __ mov(marked, old_value); + EmitBakerReadBarrierCbnz(custom_data); + __ bind(&return_address); +} + +void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl::aarch64::Register obj, + const vixl::aarch64::MemOperand& src, + bool needs_null_check, + bool use_load_acquire) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the + // Marking Register) to decide whether we need to enter the slow + // path to mark the reference. Then, in the slow path, check the + // gray bit in the lock word of the reference's holder (`obj`) to + // decide whether to mark `ref` or not. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the holder + // and jumps to the entrypoint if needed. If the holder is not gray, + // it creates a fake dependency and returns to the LDR instruction. + // + // lr = &gray_return_address; + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto field_thunk(lr) + // } + // not_gray_return_address: + // // Original reference load. If the offset is too large to fit + // // into LDR, we use an adjusted base register here. + // HeapReference reference = *(obj+offset); + // gray_return_address: + + DCHECK(src.GetAddrMode() == vixl::aarch64::Offset); + DCHECK_ALIGNED(src.GetOffset(), sizeof(mirror::HeapReference)); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + DCHECK(temps.IsAvailable(ip0)); + DCHECK(temps.IsAvailable(ip1)); + temps.Exclude(ip0, ip1); + uint32_t custom_data = use_load_acquire + ? EncodeBakerReadBarrierAcquireData(src.GetBaseRegister().GetCode(), obj.GetCode()) + : EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode()); + + { + ExactAssemblyScope guard(GetVIXLAssembler(), + (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize); + vixl::aarch64::Label return_address; + __ adr(lr, &return_address); + EmitBakerReadBarrierCbnz(custom_data); + static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), + "Field LDR must be 1 instruction (4B) before the return address label; " + " 2 instructions (8B) for heap poisoning."); + Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); + if (use_load_acquire) { + DCHECK_EQ(src.GetOffset(), 0); + __ ldar(ref_reg, src); + } else { + __ ldr(ref_reg, src); + } + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses + // macro instructions disallowed in ExactAssemblyScope. + if (kPoisonHeapReferences) { + __ neg(ref_reg, Operand(ref_reg)); + } + __ bind(&return_address); + } + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1)); +} + +void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + Register obj, + uint32_t offset, + Location maybe_temp, + bool needs_null_check, + bool use_load_acquire) { + DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference)); + Register base = obj; + if (use_load_acquire) { + DCHECK(maybe_temp.IsRegister()); + base = WRegisterFrom(maybe_temp); + __ Add(base, obj, offset); + offset = 0u; + } else if (offset >= kReferenceLoadMinFarOffset) { + DCHECK(maybe_temp.IsRegister()); + base = WRegisterFrom(maybe_temp); + static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2."); + __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u))); + offset &= (kReferenceLoadMinFarOffset - 1u); + } + MemOperand src(base.X(), offset); + GenerateFieldLoadWithBakerReadBarrier( + instruction, ref, obj, src, needs_null_check, use_load_acquire); +} + +void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction, + Location ref, + Register obj, + uint32_t data_offset, + Location index, + bool needs_null_check) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + size_t scale_factor = DataType::SizeShift(DataType::Type::kReference); + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the + // Marking Register) to decide whether we need to enter the slow + // path to mark the reference. Then, in the slow path, check the + // gray bit in the lock word of the reference's holder (`obj`) to + // decide whether to mark `ref` or not. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the holder + // and jumps to the entrypoint if needed. If the holder is not gray, + // it creates a fake dependency and returns to the LDR instruction. + // + // lr = &gray_return_address; + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto array_thunk(lr) + // } + // not_gray_return_address: + // // Original reference load. If the offset is too large to fit + // // into LDR, we use an adjusted base register here. + // HeapReference reference = data[index]; + // gray_return_address: + + DCHECK(index.IsValid()); + Register index_reg = RegisterFrom(index, DataType::Type::kInt32); + Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + DCHECK(temps.IsAvailable(ip0)); + DCHECK(temps.IsAvailable(ip1)); + temps.Exclude(ip0, ip1); + + Register temp; + if (instruction->GetArray()->IsIntermediateAddress()) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress(); + DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset); + } + temp = obj; + } else { + temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); + __ Add(temp.X(), obj.X(), Operand(data_offset)); + } + + uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode()); + + { + ExactAssemblyScope guard(GetVIXLAssembler(), + (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize); + vixl::aarch64::Label return_address; + __ adr(lr, &return_address); + EmitBakerReadBarrierCbnz(custom_data); + static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), + "Array LDR must be 1 instruction (4B) before the return address label; " + " 2 instructions (8B) for heap poisoning."); + __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor)); + DCHECK(!needs_null_check); // The thunk cannot handle the null check. + // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses + // macro instructions disallowed in ExactAssemblyScope. + if (kPoisonHeapReferences) { + __ neg(ref_reg, Operand(ref_reg)); + } + __ bind(&return_address); + } + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1)); +} + +void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { + // The following condition is a compile-time one, so it does not have a run-time cost. + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + // The following condition is a run-time one; it is executed after the + // previous compile-time test, to avoid penalizing non-debug builds. + if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register temp = temp_loc.IsValid() ? WRegisterFrom(temp_loc) : temps.AcquireW(); + GetAssembler()->GenerateMarkingRegisterCheck(temp, code); + } + } +} + +void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { + DCHECK(kEmitCompilerReadBarrier); + + // Insert a slow path based read barrier *after* the reference load. + // + // If heap poisoning is enabled, the unpoisoning of the loaded + // reference will be carried out by the runtime within the slow + // path. + // + // Note that `ref` currently does not get unpoisoned (when heap + // poisoning is enabled), which is alright as the `ref` argument is + // not used by the artReadBarrierSlow entry point. + // + // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. + SlowPathCodeARM64* slow_path = new (GetScopedAllocator()) + ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index); + AddSlowPath(slow_path); + + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { + if (kEmitCompilerReadBarrier) { + // Baker's read barriers shall be handled by the fast path + // (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier). + DCHECK(!kUseBakerReadBarrier); + // If heap poisoning is enabled, unpoisoning will be taken care of + // by the runtime within the slow path. + GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index); + } else if (kPoisonHeapReferences) { + GetAssembler()->UnpoisonHeapReference(WRegisterFrom(out)); + } +} + +void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction, + Location out, + Location root) { + DCHECK(kEmitCompilerReadBarrier); + + // Insert a slow path based read barrier *after* the GC root load. + // + // Note that GC roots are not affected by heap poisoning, so we do + // not need to do anything special for this here. + SlowPathCodeARM64* slow_path = + new (GetScopedAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root); + AddSlowPath(slow_path); + + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instruction) { + LocationSummary* locations = instruction->GetLocations(); + if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + instruction->GetIndex(), kArm64PointerSize).SizeValue(); + __ Ldr(XRegisterFrom(locations->Out()), + MemOperand(XRegisterFrom(locations->InAt(0)), method_offset)); + } else { + uint32_t method_offset = static_cast(ImTable::OffsetOfElement( + instruction->GetIndex(), kArm64PointerSize)); + __ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)), + mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value())); + __ Ldr(XRegisterFrom(locations->Out()), + MemOperand(XRegisterFrom(locations->Out()), method_offset)); + } +} + +static void PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + vixl::aarch64::Literal* literal, + uint64_t index_in_table) { + uint32_t literal_offset = literal->GetOffset(); + uintptr_t address = + reinterpret_cast(roots_data) + index_in_table * sizeof(GcRoot); + uint8_t* data = code + literal_offset; + reinterpret_cast(data)[0] = dchecked_integral_cast(address); +} + +void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { + for (const auto& entry : jit_string_patches_) { + const StringReference& string_reference = entry.first; + vixl::aarch64::Literal* table_entry_literal = entry.second; + uint64_t index_in_table = GetJitStringRootIndex(string_reference); + PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); + } + for (const auto& entry : jit_class_patches_) { + const TypeReference& type_reference = entry.first; + vixl::aarch64::Literal* table_entry_literal = entry.second; + uint64_t index_in_table = GetJitClassRootIndex(type_reference); + PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); + } +} + +#undef __ +#undef QUICK_ENTRY_POINT + +#define __ assembler.GetVIXLAssembler()-> + +static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register base_reg, + vixl::aarch64::MemOperand& lock_word, + vixl::aarch64::Label* slow_path, + vixl::aarch64::Label* throw_npe = nullptr) { + // Load the lock word containing the rb_state. + __ Ldr(ip0.W(), lock_word); + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); + static_assert( + BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, + "Field and array LDR offsets must be the same to reuse the same code."); + // To throw NPE, we return to the fast path; the artificial dependence below does not matter. + if (throw_npe != nullptr) { + __ Bind(throw_npe); + } + // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). + static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), + "Field LDR must be 1 instruction (4B) before the return address label; " + " 2 instructions (8B) for heap poisoning."); + __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); + __ Br(lr); // And return back to the function. + // Note: The fake dependency is unnecessary for the slow path. +} + +// Load the read barrier introspection entrypoint in register `entrypoint`. +static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register entrypoint) { + // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. + DCHECK_EQ(ip0.GetCode(), 16u); + const int32_t entry_point_offset = + Thread::ReadBarrierMarkEntryPointsOffset(ip0.GetCode()); + __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); +} + +void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler, + uint32_t encoded_data, + /*out*/ std::string* debug_name) { + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + switch (kind) { + case BakerReadBarrierKind::kField: + case BakerReadBarrierKind::kAcquire: { + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + auto holder_reg = + Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); + CheckValidReg(holder_reg.GetCode()); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + // In the case of a field load (with relaxed semantic), if `base_reg` differs from + // `holder_reg`, the offset was too large and we must have emitted (during the construction + // of the HIR graph, see `art::HInstructionBuilder::BuildInstanceFieldAccess`) and preserved + // (see `art::PrepareForRegisterAllocation::VisitNullCheck`) an explicit null check before + // the load. Otherwise, for implicit null checks, we need to null-check the holder as we do + // not necessarily do that check before going to the thunk. + // + // In the case of a field load with load-acquire semantics (where `base_reg` always differs + // from `holder_reg`), we also need an explicit null check when implicit null checks are + // allowed, as we do not emit one before going to the thunk. + vixl::aarch64::Label throw_npe_label; + vixl::aarch64::Label* throw_npe = nullptr; + if (GetCompilerOptions().GetImplicitNullChecks() && + (holder_reg.Is(base_reg) || (kind == BakerReadBarrierKind::kAcquire))) { + throw_npe = &throw_npe_label; + __ Cbz(holder_reg.W(), throw_npe); + } + // Check if the holder is gray and, if not, add fake dependency to the base register + // and return to the LDR instruction to load the reference. Otherwise, use introspection + // to load the reference and call the entrypoint that performs further checks on the + // reference and marks it if needed. + vixl::aarch64::Label slow_path; + MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, throw_npe); + __ Bind(&slow_path); + if (kind == BakerReadBarrierKind::kField) { + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. + __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. + } else { + DCHECK(kind == BakerReadBarrierKind::kAcquire); + DCHECK(!base_reg.Is(holder_reg)); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ldar(ip0.W(), MemOperand(base_reg)); + } + // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. + __ Br(ip1); // Jump to the entrypoint. + break; + } + case BakerReadBarrierKind::kArray: { + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label slow_path; + int32_t data_offset = + mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); + MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); + DCHECK_LT(lock_word.GetOffset(), 0); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); + __ Bind(&slow_path); + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). + __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create + // a switch case target based on the index register. + __ Mov(ip0, base_reg); // Move the base register to ip0. + __ Br(ip1); // Jump to the entrypoint's array switch case. + break; + } + case BakerReadBarrierKind::kGcRoot: { + // Check if the reference needs to be marked and if so (i.e. not null, not marked yet + // and it does not have a forwarding address), call the correct introspection entrypoint; + // otherwise return the reference (or the extracted forwarding address). + // There is no gray bit check for GC roots. + auto root_reg = + Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(root_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label return_label, not_marked, forwarding_address; + __ Cbz(root_reg, &return_label); + MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); + __ Ldr(ip0.W(), lock_word); + __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); + __ Bind(&return_label); + __ Br(lr); + __ Bind(¬_marked); + __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); + __ B(&forwarding_address, mi); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to + // art_quick_read_barrier_mark_introspection_gc_roots. + __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); + __ Mov(ip0.W(), root_reg); + __ Br(ip1); + __ Bind(&forwarding_address); + __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); + __ Br(lr); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast(kind); + UNREACHABLE(); + } + + // For JIT, the slow path is considered part of the compiled method, + // so JIT should pass null as `debug_name`. Tests may not have a runtime. + DCHECK(Runtime::Current() == nullptr || + !Runtime::Current()->UseJitCompilation() || + debug_name == nullptr); + if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { + std::ostringstream oss; + oss << "BakerReadBarrierThunk"; + switch (kind) { + case BakerReadBarrierKind::kField: + oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kAcquire: + oss << "Acquire_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kArray: + oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + case BakerReadBarrierKind::kGcRoot: + oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + } + *debug_name = oss.str(); + } +} + +#undef __ + +} // namespace arm64 +} // namespace art diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h new file mode 100644 index 0000000..6b2c805 --- /dev/null +++ b/compiler/optimizing/code_generator_arm64.h @@ -0,0 +1,962 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_ +#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_ + +#include "base/bit_field.h" +#include "code_generator.h" +#include "common_arm64.h" +#include "dex/dex_file_types.h" +#include "dex/string_reference.h" +#include "dex/type_reference.h" +#include "driver/compiler_options.h" +#include "nodes.h" +#include "parallel_move_resolver.h" +#include "utils/arm64/assembler_arm64.h" + +// TODO(VIXL): Make VIXL compile with -Wshadow. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#include "aarch64/disasm-aarch64.h" +#include "aarch64/macro-assembler-aarch64.h" +#pragma GCC diagnostic pop + +namespace art { + +namespace linker { +class Arm64RelativePatcherTest; +} // namespace linker + +namespace arm64 { + +class CodeGeneratorARM64; + +// Use a local definition to prevent copying mistakes. +static constexpr size_t kArm64WordSize = static_cast(kArm64PointerSize); + +// These constants are used as an approximate margin when emission of veneer and literal pools +// must be blocked. +static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize; +static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes; + +static const vixl::aarch64::Register kParameterCoreRegisters[] = { + vixl::aarch64::x1, + vixl::aarch64::x2, + vixl::aarch64::x3, + vixl::aarch64::x4, + vixl::aarch64::x5, + vixl::aarch64::x6, + vixl::aarch64::x7 +}; +static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); +static const vixl::aarch64::VRegister kParameterFPRegisters[] = { + vixl::aarch64::d0, + vixl::aarch64::d1, + vixl::aarch64::d2, + vixl::aarch64::d3, + vixl::aarch64::d4, + vixl::aarch64::d5, + vixl::aarch64::d6, + vixl::aarch64::d7 +}; +static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters); + +// Thread Register. +const vixl::aarch64::Register tr = vixl::aarch64::x19; +// Marking Register. +const vixl::aarch64::Register mr = vixl::aarch64::x20; +// Method register on invoke. +static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0; +const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0, + vixl::aarch64::ip1); +const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31); + +const vixl::aarch64::CPURegList runtime_reserved_core_registers = + vixl::aarch64::CPURegList( + tr, + // Reserve X20 as Marking Register when emitting Baker read barriers. + ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg), + vixl::aarch64::lr); + +// Some instructions have special requirements for a temporary, for example +// LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require +// temp that's not an R0 (to avoid an extra move) and Baker read barrier field +// loads with large offsets need a fixed register to limit the number of link-time +// thunks we generate. For these and similar cases, we want to reserve a specific +// register that's neither callee-save nor an argument register. We choose x15. +inline Location FixedTempLocation() { + return Location::RegisterLocation(vixl::aarch64::x15.GetCode()); +} + +// Callee-save registers AAPCS64, without x19 (Thread Register) (nor +// x20 (Marking Register) when emitting Baker read barriers). +const vixl::aarch64::CPURegList callee_saved_core_registers( + vixl::aarch64::CPURegister::kRegister, + vixl::aarch64::kXRegSize, + ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ? vixl::aarch64::x21.GetCode() + : vixl::aarch64::x20.GetCode()), + vixl::aarch64::x30.GetCode()); +const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister, + vixl::aarch64::kDRegSize, + vixl::aarch64::d8.GetCode(), + vixl::aarch64::d15.GetCode()); +Location ARM64ReturnLocation(DataType::Type return_type); + +class SlowPathCodeARM64 : public SlowPathCode { + public: + explicit SlowPathCodeARM64(HInstruction* instruction) + : SlowPathCode(instruction), entry_label_(), exit_label_() {} + + vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; } + vixl::aarch64::Label* GetExitLabel() { return &exit_label_; } + + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + + private: + vixl::aarch64::Label entry_label_; + vixl::aarch64::Label exit_label_; + + DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64); +}; + +class JumpTableARM64 : public DeletableArenaObject { + public: + explicit JumpTableARM64(HPackedSwitch* switch_instr) + : switch_instr_(switch_instr), table_start_() {} + + vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; } + + void EmitTable(CodeGeneratorARM64* codegen); + + private: + HPackedSwitch* const switch_instr_; + vixl::aarch64::Label table_start_; + + DISALLOW_COPY_AND_ASSIGN(JumpTableARM64); +}; + +static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] = + { vixl::aarch64::x0, + vixl::aarch64::x1, + vixl::aarch64::x2, + vixl::aarch64::x3, + vixl::aarch64::x4, + vixl::aarch64::x5, + vixl::aarch64::x6, + vixl::aarch64::x7 }; +static constexpr size_t kRuntimeParameterCoreRegistersLength = + arraysize(kRuntimeParameterCoreRegisters); +static const vixl::aarch64::VRegister kRuntimeParameterFpuRegisters[] = + { vixl::aarch64::d0, + vixl::aarch64::d1, + vixl::aarch64::d2, + vixl::aarch64::d3, + vixl::aarch64::d4, + vixl::aarch64::d5, + vixl::aarch64::d6, + vixl::aarch64::d7 }; +static constexpr size_t kRuntimeParameterFpuRegistersLength = + arraysize(kRuntimeParameterCoreRegisters); + +class InvokeRuntimeCallingConvention : public CallingConvention { + public: + static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); + + InvokeRuntimeCallingConvention() + : CallingConvention(kRuntimeParameterCoreRegisters, + kRuntimeParameterCoreRegistersLength, + kRuntimeParameterFpuRegisters, + kRuntimeParameterFpuRegistersLength, + kArm64PointerSize) {} + + Location GetReturnLocation(DataType::Type return_type); + + private: + DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); +}; + +class InvokeDexCallingConvention : public CallingConvention { + public: + InvokeDexCallingConvention() + : CallingConvention(kParameterCoreRegisters, + kParameterCoreRegistersLength, + kParameterFPRegisters, + kParameterFPRegistersLength, + kArm64PointerSize) {} + + Location GetReturnLocation(DataType::Type return_type) const { + return ARM64ReturnLocation(return_type); + } + + + private: + DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); +}; + +class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor { + public: + InvokeDexCallingConventionVisitorARM64() {} + virtual ~InvokeDexCallingConventionVisitorARM64() {} + + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type return_type) const override { + return calling_convention.GetReturnLocation(return_type); + } + Location GetMethodLocation() const override; + + private: + InvokeDexCallingConvention calling_convention; + + DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64); +}; + +class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionARM64() {} + + Location GetObjectLocation() const override { + return helpers::LocationFrom(vixl::aarch64::x1); + } + Location GetFieldIndexLocation() const override { + return helpers::LocationFrom(vixl::aarch64::x0); + } + Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { + return helpers::LocationFrom(vixl::aarch64::x0); + } + Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED, + bool is_instance) const override { + return is_instance + ? helpers::LocationFrom(vixl::aarch64::x2) + : helpers::LocationFrom(vixl::aarch64::x1); + } + Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override { + return helpers::LocationFrom(vixl::aarch64::d0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64); +}; + +class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { + public: + InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen); + +#define DECLARE_VISIT_INSTRUCTION(name, super) \ + void Visit##name(H##name* instr) override; + + FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) + +#undef DECLARE_VISIT_INSTRUCTION + + void VisitInstruction(HInstruction* instruction) override { + LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() + << " (id " << instruction->GetId() << ")"; + } + + Arm64Assembler* GetAssembler() const { return assembler_; } + vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } + + private: + void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, + vixl::aarch64::Register class_reg); + void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, + vixl::aarch64::Register temp); + void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor); + void HandleBinaryOp(HBinaryOperation* instr); + + void HandleFieldSet(HInstruction* instruction, + const FieldInfo& field_info, + bool value_can_be_null); + void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + void HandleCondition(HCondition* instruction); + + // Generate a heap reference load using one register `out`: + // + // out <- *(out + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // + // Location `maybe_temp` is used when generating a read barrier and + // shall be a register in that case; it may be an invalid location + // otherwise. + void GenerateReferenceLoadOneRegister(HInstruction* instruction, + Location out, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option); + // Generate a heap reference load using two different registers + // `out` and `obj`: + // + // out <- *(obj + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // + // Location `maybe_temp` is used when generating a Baker's (fast + // path) read barrier and shall be a register in that case; it may + // be an invalid location otherwise. + void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option); + + // Generate a floating-point comparison. + void GenerateFcmp(HInstruction* instruction); + + void HandleShift(HBinaryOperation* instr); + void GenerateTestAndBranch(HInstruction* instruction, + size_t condition_input_index, + vixl::aarch64::Label* true_target, + vixl::aarch64::Label* false_target); + void DivRemOneOrMinusOne(HBinaryOperation* instruction); + void DivRemByPowerOfTwo(HBinaryOperation* instruction); + void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); + void GenerateIntDiv(HDiv* instruction); + void GenerateIntDivForConstDenom(HDiv *instruction); + void GenerateIntDivForPower2Denom(HDiv *instruction); + void GenerateIntRem(HRem* instruction); + void GenerateIntRemForConstDenom(HRem *instruction); + void GenerateIntRemForPower2Denom(HRem *instruction); + void HandleGoto(HInstruction* got, HBasicBlock* successor); + + vixl::aarch64::MemOperand VecAddress( + HVecMemoryOperation* instruction, + // This function may acquire a scratch register. + vixl::aarch64::UseScratchRegisterScope* temps_scope, + size_t size, + bool is_string_char_at, + /*out*/ vixl::aarch64::Register* scratch); + + Arm64Assembler* const assembler_; + CodeGeneratorARM64* const codegen_; + + DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64); +}; + +class LocationsBuilderARM64 : public HGraphVisitor { + public: + LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen) + : HGraphVisitor(graph), codegen_(codegen) {} + +#define DECLARE_VISIT_INSTRUCTION(name, super) \ + void Visit##name(H##name* instr) override; + + FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) + +#undef DECLARE_VISIT_INSTRUCTION + + void VisitInstruction(HInstruction* instruction) override { + LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() + << " (id " << instruction->GetId() << ")"; + } + + private: + void HandleBinaryOp(HBinaryOperation* instr); + void HandleFieldSet(HInstruction* instruction); + void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + void HandleInvoke(HInvoke* instr); + void HandleCondition(HCondition* instruction); + void HandleShift(HBinaryOperation* instr); + + CodeGeneratorARM64* const codegen_; + InvokeDexCallingConventionVisitorARM64 parameter_visitor_; + + DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64); +}; + +class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap { + public: + ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen) + : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {} + + protected: + void PrepareForEmitNativeCode() override; + void FinishEmitNativeCode() override; + Location AllocateScratchLocationFor(Location::Kind kind) override; + void FreeScratchLocation(Location loc) override; + void EmitMove(size_t index) override; + + private: + Arm64Assembler* GetAssembler() const; + vixl::aarch64::MacroAssembler* GetVIXLAssembler() const { + return GetAssembler()->GetVIXLAssembler(); + } + + CodeGeneratorARM64* const codegen_; + vixl::aarch64::UseScratchRegisterScope vixl_temps_; + + DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64); +}; + +class CodeGeneratorARM64 : public CodeGenerator { + public: + CodeGeneratorARM64(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats = nullptr); + virtual ~CodeGeneratorARM64() {} + + void GenerateFrameEntry() override; + void GenerateFrameExit() override; + + vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const; + vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const; + + void Bind(HBasicBlock* block) override; + + vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) { + block = FirstNonEmptyBlock(block); + return &(block_labels_[block->GetBlockId()]); + } + + size_t GetWordSize() const override { + return kArm64WordSize; + } + + size_t GetSlowPathFPWidth() const override { + return GetGraph()->HasSIMD() + ? vixl::aarch64::kQRegSizeInBytes + : vixl::aarch64::kDRegSizeInBytes; + } + + size_t GetCalleePreservedFPWidth() const override { + return vixl::aarch64::kDRegSizeInBytes; + } + + uintptr_t GetAddressOf(HBasicBlock* block) override { + vixl::aarch64::Label* block_entry_label = GetLabelOf(block); + DCHECK(block_entry_label->IsBound()); + return block_entry_label->GetLocation(); + } + + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } + Arm64Assembler* GetAssembler() override { return &assembler_; } + const Arm64Assembler& GetAssembler() const override { return assembler_; } + vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } + + // Emit a write barrier. + void MarkGCCard(vixl::aarch64::Register object, + vixl::aarch64::Register value, + bool value_can_be_null); + + void GenerateMemoryBarrier(MemBarrierKind kind); + + // Register allocation. + + void SetupBlockedRegisters() const override; + + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + + // The number of registers that can be allocated. The register allocator may + // decide to reserve and not use a few of them. + // We do not consider registers sp, xzr, wzr. They are either not allocatable + // (xzr, wzr), or make for poor allocatable registers (sp alignment + // requirements, etc.). This also facilitates our task as all other registers + // can easily be mapped via to or from their type and index or code. + static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1; + static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfVRegisters; + static constexpr int kNumberOfAllocatableRegisterPairs = 0; + + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; + + InstructionSet GetInstructionSet() const override { + return InstructionSet::kArm64; + } + + const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const; + + void Initialize() override { + block_labels_.resize(GetGraph()->GetBlocks().size()); + } + + // We want to use the STP and LDP instructions to spill and restore registers for slow paths. + // These instructions can only encode offsets that are multiples of the register size accessed. + uint32_t GetPreferredSlotsAlignment() const override { return vixl::aarch64::kXRegSizeInBytes; } + + JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) { + jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr)); + return jump_tables_.back().get(); + } + + void Finalize(CodeAllocator* allocator) override; + + // Code generation helpers. + void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant); + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; + + void Load(DataType::Type type, + vixl::aarch64::CPURegister dst, + const vixl::aarch64::MemOperand& src); + void Store(DataType::Type type, + vixl::aarch64::CPURegister src, + const vixl::aarch64::MemOperand& dst); + void LoadAcquire(HInstruction* instruction, + vixl::aarch64::CPURegister dst, + const vixl::aarch64::MemOperand& src, + bool needs_null_check); + void StoreRelease(HInstruction* instruction, + DataType::Type type, + vixl::aarch64::CPURegister src, + const vixl::aarch64::MemOperand& dst, + bool needs_null_check); + + // Generate code to invoke a runtime entry point. + void InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr) override; + + // Generate code to invoke a runtime entry point, but do not record + // PC-related information in a stack map. + void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, + HInstruction* instruction, + SlowPathCode* slow_path); + + ParallelMoveResolverARM64* GetMoveResolver() override { return &move_resolver_; } + + bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { + return false; + } + + // Check if the desired_string_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + HLoadString::LoadKind GetSupportedLoadStringKind( + HLoadString::LoadKind desired_string_load_kind) override; + + // Check if the desired_class_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + HLoadClass::LoadKind GetSupportedLoadClassKind( + HLoadClass::LoadKind desired_class_load_kind) override; + + // Check if the desired_dispatch_info is supported. If it is, return it, + // otherwise return a fall-back info that should be used instead. + HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( + const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, + ArtMethod* method) override; + + void GenerateStaticOrDirectCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; + void GenerateVirtualCall( + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; + + void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED, + DataType::Type type ATTRIBUTE_UNUSED) override { + UNIMPLEMENTED(FATAL); + } + + // Add a new boot image intrinsic patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBootImageIntrinsicPatch(uint32_t intrinsic_data, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new boot image relocation patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBootImageRelRoPatch(uint32_t boot_image_offset, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new boot image method patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBootImageMethodPatch(MethodReference target_method, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new .bss entry method patch for an instruction and return + // the label to be bound before the instruction. The instruction will be + // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` + // pointing to the associated ADRP patch label). + vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new boot image type patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBootImageTypePatch(const DexFile& dex_file, + dex::TypeIndex type_index, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new .bss entry type patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file, + dex::TypeIndex type_index, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new boot image string patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewBootImageStringPatch(const DexFile& dex_file, + dex::StringIndex string_index, + vixl::aarch64::Label* adrp_label = nullptr); + + // Add a new .bss entry string patch for an instruction and return the label + // to be bound before the instruction. The instruction will be either the + // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing + // to the associated ADRP patch label). + vixl::aarch64::Label* NewStringBssEntryPatch(const DexFile& dex_file, + dex::StringIndex string_index, + vixl::aarch64::Label* adrp_label = nullptr); + + // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT. + void EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset); + + // Emit the CBNZ instruction for baker read barrier and record + // the associated patch for AOT or slow path for JIT. + void EmitBakerReadBarrierCbnz(uint32_t custom_data); + + vixl::aarch64::Literal* DeduplicateBootImageAddressLiteral(uint64_t address); + vixl::aarch64::Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, + dex::StringIndex string_index, + Handle handle); + vixl::aarch64::Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, + dex::TypeIndex string_index, + Handle handle); + + void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg); + void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label, + vixl::aarch64::Register out, + vixl::aarch64::Register base); + void EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label, + vixl::aarch64::Register out, + vixl::aarch64::Register base); + + void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference); + void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); + + void EmitLinkerPatches(ArenaVector* linker_patches) override; + bool NeedsThunkCode(const linker::LinkerPatch& patch) const override; + void EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector* code, + /*out*/ std::string* debug_name) override; + + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; + + // Generate a GC root reference load: + // + // root <- *(obj + offset) + // + // while honoring read barriers based on read_barrier_option. + void GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + vixl::aarch64::Register obj, + uint32_t offset, + vixl::aarch64::Label* fixup_label, + ReadBarrierOption read_barrier_option); + // Generate MOV for the `old_value` in UnsafeCASObject and mark it with Baker read barrier. + void GenerateUnsafeCasOldValueMovWithBakerReadBarrier(vixl::aarch64::Register marked, + vixl::aarch64::Register old_value); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference field load when Baker's read barriers are used. + // Overload suitable for Unsafe.getObject/-Volatile() intrinsic. + void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl::aarch64::Register obj, + const vixl::aarch64::MemOperand& src, + bool needs_null_check, + bool use_load_acquire); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference field load when Baker's read barriers are used. + void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl::aarch64::Register obj, + uint32_t offset, + Location maybe_temp, + bool needs_null_check, + bool use_load_acquire); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference array load when Baker's read barriers are used. + void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction, + Location ref, + vixl::aarch64::Register obj, + uint32_t data_offset, + Location index, + bool needs_null_check); + + // Emit code checking the status of the Marking Register, and + // aborting the program if MR does not match the value stored in the + // art::Thread object. Code is only emitted in debug mode and if + // CompilerOptions::EmitRunTimeChecksInDebugMode returns true. + // + // Argument `code` is used to identify the different occurrences of + // MaybeGenerateMarkingRegisterCheck in the code generator, and is + // passed to the BRK instruction. + // + // If `temp_loc` is a valid location, it is expected to be a + // register and will be used as a temporary to generate code; + // otherwise, a temporary will be fetched from the core register + // scratch pool. + virtual void MaybeGenerateMarkingRegisterCheck(int code, + Location temp_loc = Location::NoLocation()); + + // Generate a read barrier for a heap reference within `instruction` + // using a slow path. + // + // A read barrier for an object reference read from the heap is + // implemented as a call to the artReadBarrierSlow runtime entry + // point, which is passed the values in locations `ref`, `obj`, and + // `offset`: + // + // mirror::Object* artReadBarrierSlow(mirror::Object* ref, + // mirror::Object* obj, + // uint32_t offset); + // + // The `out` location contains the value returned by + // artReadBarrierSlow. + // + // When `index` is provided (i.e. for array accesses), the offset + // value passed to artReadBarrierSlow is adjusted to take `index` + // into account. + void GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // If read barriers are enabled, generate a read barrier for a heap + // reference using a slow path. If heap poisoning is enabled, also + // unpoison the reference in `out`. + void MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // Generate a read barrier for a GC root within `instruction` using + // a slow path. + // + // A read barrier for an object reference GC root is implemented as + // a call to the artReadBarrierForRootSlow runtime entry point, + // which is passed the value in location `root`: + // + // mirror::Object* artReadBarrierForRootSlow(GcRoot* root); + // + // The `out` location contains the value returned by + // artReadBarrierForRootSlow. + void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); + + void GenerateNop() override; + + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; + + void MaybeRecordImplicitNullCheck(HInstruction* instr) final { + // The function must be only called within special scopes + // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of + // veneer/literal pools by VIXL assembler. + CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true) + << "The function must only be called within EmissionCheckScope or ExactAssemblyScope"; + CodeGenerator::MaybeRecordImplicitNullCheck(instr); + } + + void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl::aarch64::Register klass); + void MaybeIncrementHotness(bool is_frame_entry); + + private: + // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. + + enum class BakerReadBarrierKind : uint8_t { + kField, // Field get or array get with constant offset (i.e. constant index). + kAcquire, // Volatile field get. + kArray, // Array get with index in register. + kGcRoot, // GC root load. + kLast = kGcRoot + }; + + static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* sp/zr is invalid */ 31u; + + static constexpr size_t kBitsForBakerReadBarrierKind = + MinimumBitsToStore(static_cast(BakerReadBarrierKind::kLast)); + static constexpr size_t kBakerReadBarrierBitsForRegister = + MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg); + using BakerReadBarrierKindField = + BitField; + using BakerReadBarrierFirstRegField = + BitField; + using BakerReadBarrierSecondRegField = + BitField; + + static void CheckValidReg(uint32_t reg) { + DCHECK(reg < vixl::aarch64::lr.GetCode() && + reg != vixl::aarch64::ip0.GetCode() && + reg != vixl::aarch64::ip1.GetCode()) << reg; + } + + static inline uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg) { + CheckValidReg(base_reg); + CheckValidReg(holder_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(holder_reg); + } + + static inline uint32_t EncodeBakerReadBarrierAcquireData(uint32_t base_reg, uint32_t holder_reg) { + CheckValidReg(base_reg); + CheckValidReg(holder_reg); + DCHECK_NE(base_reg, holder_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kAcquire) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(holder_reg); + } + + static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { + CheckValidReg(base_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg); + } + + static inline uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) { + CheckValidReg(root_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | + BakerReadBarrierFirstRegField::Encode(root_reg) | + BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg); + } + + void CompileBakerReadBarrierThunk(Arm64Assembler& assembler, + uint32_t encoded_data, + /*out*/ std::string* debug_name); + + using Uint64ToLiteralMap = ArenaSafeMap*>; + using Uint32ToLiteralMap = ArenaSafeMap*>; + using StringToLiteralMap = ArenaSafeMap*, + StringReferenceValueComparator>; + using TypeToLiteralMap = ArenaSafeMap*, + TypeReferenceValueComparator>; + + vixl::aarch64::Literal* DeduplicateUint32Literal(uint32_t value); + vixl::aarch64::Literal* DeduplicateUint64Literal(uint64_t value); + + // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, + // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. + struct PcRelativePatchInfo : PatchInfo { + PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx) + : PatchInfo(dex_file, off_or_idx), pc_insn_label() { } + + vixl::aarch64::Label* pc_insn_label; + }; + + struct BakerReadBarrierPatchInfo { + explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { } + + vixl::aarch64::Label label; + uint32_t custom_data; + }; + + vixl::aarch64::Label* NewPcRelativePatch(const DexFile* dex_file, + uint32_t offset_or_index, + vixl::aarch64::Label* adrp_label, + ArenaDeque* patches); + + void EmitJumpTables(); + + template + static void EmitPcRelativeLinkerPatches(const ArenaDeque& infos, + ArenaVector* linker_patches); + + // Labels for each block that will be compiled. + // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory. + ArenaDeque block_labels_; // Indexed by block id. + vixl::aarch64::Label frame_entry_label_; + ArenaVector> jump_tables_; + + LocationsBuilderARM64 location_builder_; + InstructionCodeGeneratorARM64 instruction_visitor_; + ParallelMoveResolverARM64 move_resolver_; + Arm64Assembler assembler_; + + // PC-relative method patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_method_patches_; + // PC-relative method patch info for kBssEntry. + ArenaDeque method_bss_entry_patches_; + // PC-relative type patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_type_patches_; + // PC-relative type patch info for kBssEntry. + ArenaDeque type_bss_entry_patches_; + // PC-relative String patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_string_patches_; + // PC-relative String patch info for kBssEntry. + ArenaDeque string_bss_entry_patches_; + // PC-relative patch info for IntrinsicObjects for the boot image, + // and for method/type/string patches for kBootImageRelRo otherwise. + ArenaDeque boot_image_other_patches_; + // Patch info for calls to entrypoint dispatch thunks. Used for slow paths. + ArenaDeque> call_entrypoint_patches_; + // Baker read barrier patch info. + ArenaDeque baker_read_barrier_patches_; + + // Deduplication map for 32-bit literals, used for JIT for boot image addresses. + Uint32ToLiteralMap uint32_literals_; + // Deduplication map for 64-bit literals, used for JIT for method address or method code. + Uint64ToLiteralMap uint64_literals_; + // Patches for string literals in JIT compiled code. + StringToLiteralMap jit_string_patches_; + // Patches for class literals in JIT compiled code. + TypeToLiteralMap jit_class_patches_; + + // Baker read barrier slow paths, mapping custom data (uint32_t) to label. + // Wrap the label to work around vixl::aarch64::Label being non-copyable + // and non-moveable and as such unusable in ArenaSafeMap<>. + struct LabelWrapper { + LabelWrapper(const LabelWrapper& src) + : label() { + DCHECK(!src.label.IsLinked() && !src.label.IsBound()); + } + LabelWrapper() = default; + vixl::aarch64::Label label; + }; + ArenaSafeMap jit_baker_read_barrier_slow_paths_; + + friend class linker::Arm64RelativePatcherTest; + DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64); +}; + +inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const { + return codegen_->GetAssembler(); +} + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_ diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc new file mode 100644 index 0000000..3a2cf40 --- /dev/null +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -0,0 +1,9758 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_arm_vixl.h" + +#include "arch/arm/asm_support_arm.h" +#include "arch/arm/instruction_set_features_arm.h" +#include "art_method-inl.h" +#include "base/bit_utils.h" +#include "base/bit_utils_iterator.h" +#include "class_table.h" +#include "code_generator_utils.h" +#include "common_arm.h" +#include "compiled_method.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "gc/accounting/card_table.h" +#include "gc/space/image_space.h" +#include "heap_poisoning.h" +#include "intrinsics.h" +#include "intrinsics_arm_vixl.h" +#include "linker/linker_patch.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "scoped_thread_state_change-inl.h" +#include "thread.h" +#include "utils/arm/assembler_arm_vixl.h" +#include "utils/arm/managed_register_arm.h" +#include "utils/assembler.h" +#include "utils/stack_checks.h" + +namespace art { +namespace arm { + +namespace vixl32 = vixl::aarch32; +using namespace vixl32; // NOLINT(build/namespaces) + +using helpers::DRegisterFrom; +using helpers::HighRegisterFrom; +using helpers::InputDRegisterAt; +using helpers::InputOperandAt; +using helpers::InputRegister; +using helpers::InputRegisterAt; +using helpers::InputSRegisterAt; +using helpers::InputVRegister; +using helpers::InputVRegisterAt; +using helpers::Int32ConstantFrom; +using helpers::Int64ConstantFrom; +using helpers::LocationFrom; +using helpers::LowRegisterFrom; +using helpers::LowSRegisterFrom; +using helpers::OperandFrom; +using helpers::OutputRegister; +using helpers::OutputSRegister; +using helpers::OutputVRegister; +using helpers::RegisterFrom; +using helpers::SRegisterFrom; +using helpers::Uint64ConstantFrom; + +using vixl::EmissionCheckScope; +using vixl::ExactAssemblyScope; +using vixl::CodeBufferCheckScope; + +using RegisterList = vixl32::RegisterList; + +static bool ExpectedPairLayout(Location location) { + // We expected this for both core and fpu register pairs. + return ((location.low() & 1) == 0) && (location.low() + 1 == location.high()); +} +// Use a local definition to prevent copying mistakes. +static constexpr size_t kArmWordSize = static_cast(kArmPointerSize); +static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte; +static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7; + +// Reference load (except object array loads) is using LDR Rt, [Rn, #offset] which can handle +// offset < 4KiB. For offsets >= 4KiB, the load shall be emitted as two or more instructions. +// For the Baker read barrier implementation using link-time generated thunks we need to split +// the offset explicitly. +constexpr uint32_t kReferenceLoadMinFarOffset = 4 * KB; + +// Using a base helps identify when we hit Marking Register check breakpoints. +constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10; + +#ifdef __ +#error "ARM Codegen VIXL macro-assembler macro already defined." +#endif + +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(codegen)->GetVIXLAssembler()-> // NOLINT +#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value() + +// Marker that code is yet to be, and must, be implemented. +#define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented " + +static inline bool CanEmitNarrowLdr(vixl32::Register rt, vixl32::Register rn, uint32_t offset) { + return rt.IsLow() && rn.IsLow() && offset < 32u; +} + +class EmitAdrCode { + public: + EmitAdrCode(ArmVIXLMacroAssembler* assembler, vixl32::Register rd, vixl32::Label* label) + : assembler_(assembler), rd_(rd), label_(label) { + DCHECK(!assembler->AllowMacroInstructions()); // In ExactAssemblyScope. + adr_location_ = assembler->GetCursorOffset(); + assembler->adr(EncodingSize(Wide), rd, label); + } + + ~EmitAdrCode() { + DCHECK(label_->IsBound()); + // The ADR emitted by the assembler does not set the Thumb mode bit we need. + // TODO: Maybe extend VIXL to allow ADR for return address? + uint8_t* raw_adr = assembler_->GetBuffer()->GetOffsetAddress(adr_location_); + // Expecting ADR encoding T3 with `(offset & 1) == 0`. + DCHECK_EQ(raw_adr[1] & 0xfbu, 0xf2u); // Check bits 24-31, except 26. + DCHECK_EQ(raw_adr[0] & 0xffu, 0x0fu); // Check bits 16-23. + DCHECK_EQ(raw_adr[3] & 0x8fu, rd_.GetCode()); // Check bits 8-11 and 15. + DCHECK_EQ(raw_adr[2] & 0x01u, 0x00u); // Check bit 0, i.e. the `offset & 1`. + // Add the Thumb mode bit. + raw_adr[2] |= 0x01u; + } + + private: + ArmVIXLMacroAssembler* const assembler_; + vixl32::Register rd_; + vixl32::Label* const label_; + int32_t adr_location_; +}; + +static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0))); + // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK() + // that the the kPrimNot result register is the same as the first argument register. + return caller_saves; +} + +// SaveLiveRegisters and RestoreLiveRegisters from SlowPathCodeARM operate on sets of S registers, +// for each live D registers they treat two corresponding S registers as live ones. +// +// Two following functions (SaveContiguousSRegisterList, RestoreContiguousSRegisterList) build +// from a list of contiguous S registers a list of contiguous D registers (processing first/last +// S registers corner cases) and save/restore this new list treating them as D registers. +// - decreasing code size +// - avoiding hazards on Cortex-A57, when a pair of S registers for an actual live D register is +// restored and then used in regular non SlowPath code as D register. +// +// For the following example (v means the S register is live): +// D names: | D0 | D1 | D2 | D4 | ... +// S names: | S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 | ... +// Live? | | v | v | v | v | v | v | | ... +// +// S1 and S6 will be saved/restored independently; D registers list (D1, D2) will be processed +// as D registers. +// +// TODO(VIXL): All this code should be unnecessary once the VIXL AArch32 backend provides helpers +// for lists of floating-point registers. +static size_t SaveContiguousSRegisterList(size_t first, + size_t last, + CodeGenerator* codegen, + size_t stack_offset) { + static_assert(kSRegSizeInBytes == kArmWordSize, "Broken assumption on reg/word sizes."); + static_assert(kDRegSizeInBytes == 2 * kArmWordSize, "Broken assumption on reg/word sizes."); + DCHECK_LE(first, last); + if ((first == last) && (first == 0)) { + __ Vstr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); + return stack_offset + kSRegSizeInBytes; + } + if (first % 2 == 1) { + __ Vstr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); + stack_offset += kSRegSizeInBytes; + } + + bool save_last = false; + if (last % 2 == 0) { + save_last = true; + --last; + } + + if (first < last) { + vixl32::DRegister d_reg = vixl32::DRegister(first / 2); + DCHECK_EQ((last - first + 1) % 2, 0u); + size_t number_of_d_regs = (last - first + 1) / 2; + + if (number_of_d_regs == 1) { + __ Vstr(d_reg, MemOperand(sp, stack_offset)); + } else if (number_of_d_regs > 1) { + UseScratchRegisterScope temps(down_cast(codegen)->GetVIXLAssembler()); + vixl32::Register base = sp; + if (stack_offset != 0) { + base = temps.Acquire(); + __ Add(base, sp, Operand::From(stack_offset)); + } + __ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs)); + } + stack_offset += number_of_d_regs * kDRegSizeInBytes; + } + + if (save_last) { + __ Vstr(vixl32::SRegister(last + 1), MemOperand(sp, stack_offset)); + stack_offset += kSRegSizeInBytes; + } + + return stack_offset; +} + +static size_t RestoreContiguousSRegisterList(size_t first, + size_t last, + CodeGenerator* codegen, + size_t stack_offset) { + static_assert(kSRegSizeInBytes == kArmWordSize, "Broken assumption on reg/word sizes."); + static_assert(kDRegSizeInBytes == 2 * kArmWordSize, "Broken assumption on reg/word sizes."); + DCHECK_LE(first, last); + if ((first == last) && (first == 0)) { + __ Vldr(vixl32::SRegister(first), MemOperand(sp, stack_offset)); + return stack_offset + kSRegSizeInBytes; + } + if (first % 2 == 1) { + __ Vldr(vixl32::SRegister(first++), MemOperand(sp, stack_offset)); + stack_offset += kSRegSizeInBytes; + } + + bool restore_last = false; + if (last % 2 == 0) { + restore_last = true; + --last; + } + + if (first < last) { + vixl32::DRegister d_reg = vixl32::DRegister(first / 2); + DCHECK_EQ((last - first + 1) % 2, 0u); + size_t number_of_d_regs = (last - first + 1) / 2; + if (number_of_d_regs == 1) { + __ Vldr(d_reg, MemOperand(sp, stack_offset)); + } else if (number_of_d_regs > 1) { + UseScratchRegisterScope temps(down_cast(codegen)->GetVIXLAssembler()); + vixl32::Register base = sp; + if (stack_offset != 0) { + base = temps.Acquire(); + __ Add(base, sp, Operand::From(stack_offset)); + } + __ Vldm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs)); + } + stack_offset += number_of_d_regs * kDRegSizeInBytes; + } + + if (restore_last) { + __ Vldr(vixl32::SRegister(last + 1), MemOperand(sp, stack_offset)); + stack_offset += kSRegSizeInBytes; + } + + return stack_offset; +} + +static LoadOperandType GetLoadOperandType(DataType::Type type) { + switch (type) { + case DataType::Type::kReference: + return kLoadWord; + case DataType::Type::kBool: + case DataType::Type::kUint8: + return kLoadUnsignedByte; + case DataType::Type::kInt8: + return kLoadSignedByte; + case DataType::Type::kUint16: + return kLoadUnsignedHalfword; + case DataType::Type::kInt16: + return kLoadSignedHalfword; + case DataType::Type::kInt32: + return kLoadWord; + case DataType::Type::kInt64: + return kLoadWordPair; + case DataType::Type::kFloat32: + return kLoadSWord; + case DataType::Type::kFloat64: + return kLoadDWord; + default: + LOG(FATAL) << "Unreachable type " << type; + UNREACHABLE(); + } +} + +static StoreOperandType GetStoreOperandType(DataType::Type type) { + switch (type) { + case DataType::Type::kReference: + return kStoreWord; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + return kStoreByte; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + return kStoreHalfword; + case DataType::Type::kInt32: + return kStoreWord; + case DataType::Type::kInt64: + return kStoreWordPair; + case DataType::Type::kFloat32: + return kStoreSWord; + case DataType::Type::kFloat64: + return kStoreDWord; + default: + LOG(FATAL) << "Unreachable type " << type; + UNREACHABLE(); + } +} + +void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + size_t orig_offset = stack_offset; + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + // If the register holds an object, update the stack mask. + if (locations->RegisterContainsObject(i)) { + locations->SetStackBit(stack_offset / kVRegSize); + } + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_core_stack_offsets_[i] = stack_offset; + stack_offset += kArmWordSize; + } + + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset); + + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + orig_offset = stack_offset; + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_fpu_stack_offsets_[i] = stack_offset; + stack_offset += kArmWordSize; + } + + stack_offset = orig_offset; + while (fp_spills != 0u) { + uint32_t begin = CTZ(fp_spills); + uint32_t tmp = fp_spills + (1u << begin); + fp_spills &= tmp; // Clear the contiguous range of 1s. + uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined. + stack_offset = SaveContiguousSRegisterList(begin, end - 1, codegen, stack_offset); + } + DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); +} + +void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + size_t orig_offset = stack_offset; + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + stack_offset += kArmWordSize; + } + + // TODO(VIXL): Check the coherency of stack_offset after this with a test. + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset); + + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); + while (fp_spills != 0u) { + uint32_t begin = CTZ(fp_spills); + uint32_t tmp = fp_spills + (1u << begin); + fp_spills &= tmp; // Clear the contiguous range of 1s. + uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined. + stack_offset = RestoreContiguousSRegisterList(begin, end - 1, codegen, stack_offset); + } + DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); +} + +class NullCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit NullCheckSlowPathARMVIXL(HNullCheck* instruction) : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + arm_codegen->InvokeRuntime(kQuickThrowNullPointer, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "NullCheckSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARMVIXL); +}; + +class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction) + : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "DivZeroCheckSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL); +}; + +class SuspendCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + SuspendCheckSlowPathARMVIXL(HSuspendCheck* instruction, HBasicBlock* successor) + : SlowPathCodeARMVIXL(instruction), successor_(successor) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + if (successor_ == nullptr) { + __ B(GetReturnLabel()); + } else { + __ B(arm_codegen->GetLabelOf(successor_)); + } + } + + vixl32::Label* GetReturnLabel() { + DCHECK(successor_ == nullptr); + return &return_label_; + } + + HBasicBlock* GetSuccessor() const { + return successor_; + } + + const char* GetDescription() const override { return "SuspendCheckSlowPathARMVIXL"; } + + private: + // If not null, the block to branch to after the suspend check. + HBasicBlock* const successor_; + + // If `successor_` is null, the label to branch to after the suspend check. + vixl32::Label return_label_; + + DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARMVIXL); +}; + +class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction) + : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + LocationSummary* locations = instruction_->GetLocations(); + + __ Bind(GetEntryLabel()); + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConventionARMVIXL calling_convention; + codegen->EmitParallelMoves( + locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kInt32, + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32); + QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() + ? kQuickThrowStringBounds + : kQuickThrowArrayBounds; + arm_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "BoundsCheckSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL); +}; + +class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at) + : SlowPathCodeARMVIXL(at), cls_(cls) { + DCHECK(at->IsLoadClass() || at->IsClinitCheck()); + DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Location out = locations->Out(); + const uint32_t dex_pc = instruction_->GetDexPc(); + bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath(); + bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck(); + + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConventionARMVIXL calling_convention; + if (must_resolve_type) { + DCHECK(IsSameDexFile(cls_->GetDexFile(), arm_codegen->GetGraph()->GetDexFile())); + dex::TypeIndex type_index = cls_->GetTypeIndex(); + __ Mov(calling_convention.GetRegisterAt(0), type_index.index_); + arm_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this); + CheckEntrypointTypes(); + // If we also must_do_clinit, the resolved type is now in the correct register. + } else { + DCHECK(must_do_clinit); + Location source = instruction_->IsLoadClass() ? out : locations->InAt(0); + arm_codegen->Move32(LocationFrom(calling_convention.GetRegisterAt(0)), source); + } + if (must_do_clinit) { + arm_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this); + CheckEntrypointTypes(); + } + + // Move the class to the desired location. + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + arm_codegen->Move32(locations->Out(), LocationFrom(r0)); + } + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadClassSlowPathARMVIXL"; } + + private: + // The class this slow path will load. + HLoadClass* const cls_; + + DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL); +}; + +class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit LoadStringSlowPathARMVIXL(HLoadString* instruction) + : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + DCHECK(instruction_->IsLoadString()); + DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry); + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex(); + + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConventionARMVIXL calling_convention; + __ Mov(calling_convention.GetRegisterAt(0), string_index.index_); + arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + + arm_codegen->Move32(locations->Out(), LocationFrom(r0)); + RestoreLiveRegisters(codegen, locations); + + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadStringSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL); +}; + +class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal) + : SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(instruction_->IsCheckCast() + || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + + if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) { + SaveLiveRegisters(codegen, locations); + } + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConventionARMVIXL calling_convention; + + codegen->EmitParallelMoves(locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kReference); + if (instruction_->IsInstanceOf()) { + arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + arm_codegen->Move32(locations->Out(), LocationFrom(r0)); + } else { + DCHECK(instruction_->IsCheckCast()); + arm_codegen->InvokeRuntime(kQuickCheckInstanceOf, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } + + if (!is_fatal_) { + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + } + + const char* GetDescription() const override { return "TypeCheckSlowPathARMVIXL"; } + + bool IsFatal() const override { return is_fatal_; } + + private: + const bool is_fatal_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARMVIXL); +}; + +class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction) + : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + LocationSummary* locations = instruction_->GetLocations(); + SaveLiveRegisters(codegen, locations); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + __ Mov(calling_convention.GetRegisterAt(0), + static_cast(instruction_->AsDeoptimize()->GetDeoptimizationKind())); + + arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + const char* GetDescription() const override { return "DeoptimizationSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL); +}; + +class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConventionARMVIXL calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove( + locations->InAt(0), + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + nullptr); + parallel_move.AddMove( + locations->InAt(1), + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32, + nullptr); + parallel_move.AddMove( + locations->InAt(2), + LocationFrom(calling_convention.GetRegisterAt(2)), + DataType::Type::kReference, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + arm_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "ArraySetSlowPathARMVIXL"; } + + private: + DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL); +}; + +// Slow path generating a read barrier for a heap reference. +class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + ReadBarrierForHeapReferenceSlowPathARMVIXL(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) + : SlowPathCodeARMVIXL(instruction), + out_(out), + ref_(ref), + obj_(obj), + offset_(offset), + index_(index) { + DCHECK(kEmitCompilerReadBarrier); + // If `obj` is equal to `out` or `ref`, it means the initial object + // has been overwritten by (or after) the heap object reference load + // to be instrumented, e.g.: + // + // __ LoadFromOffset(kLoadWord, out, out, offset); + // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); + // + // In that case, we have lost the information about the original + // object, and the emitted read barrier cannot work properly. + DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out; + DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; + } + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + LocationSummary* locations = instruction_->GetLocations(); + vixl32::Register reg_out = RegisterFrom(out_); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.GetCode())); + DCHECK(instruction_->IsInstanceFieldGet() || + instruction_->IsStaticFieldGet() || + instruction_->IsArrayGet() || + instruction_->IsInstanceOf() || + instruction_->IsCheckCast() || + (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier for heap reference slow path: " + << instruction_->DebugName(); + // The read barrier instrumentation of object ArrayGet + // instructions does not support the HIntermediateAddress + // instruction. + DCHECK(!(instruction_->IsArrayGet() && + instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress())); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + // We may have to change the index's value, but as `index_` is a + // constant member (like other "inputs" of this slow path), + // introduce a copy of it, `index`. + Location index = index_; + if (index_.IsValid()) { + // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics. + if (instruction_->IsArrayGet()) { + // Compute the actual memory offset and store it in `index`. + vixl32::Register index_reg = RegisterFrom(index_); + DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg.GetCode())); + if (codegen->IsCoreCalleeSaveRegister(index_reg.GetCode())) { + // We are about to change the value of `index_reg` (see the + // calls to art::arm::ArmVIXLMacroAssembler::Lsl and + // art::arm::ArmVIXLMacroAssembler::Add below), but it has + // not been saved by the previous call to + // art::SlowPathCode::SaveLiveRegisters, as it is a + // callee-save register -- + // art::SlowPathCode::SaveLiveRegisters does not consider + // callee-save registers, as it has been designed with the + // assumption that callee-save registers are supposed to be + // handled by the called function. So, as a callee-save + // register, `index_reg` _would_ eventually be saved onto + // the stack, but it would be too late: we would have + // changed its value earlier. Therefore, we manually save + // it here into another freely available register, + // `free_reg`, chosen of course among the caller-save + // registers (as a callee-save `free_reg` register would + // exhibit the same problem). + // + // Note we could have requested a temporary register from + // the register allocator instead; but we prefer not to, as + // this is a slow path, and we know we can find a + // caller-save register that is available. + vixl32::Register free_reg = FindAvailableCallerSaveRegister(codegen); + __ Mov(free_reg, index_reg); + index_reg = free_reg; + index = LocationFrom(index_reg); + } else { + // The initial register stored in `index_` has already been + // saved in the call to art::SlowPathCode::SaveLiveRegisters + // (as it is not a callee-save register), so we can freely + // use it. + } + // Shifting the index value contained in `index_reg` by the scale + // factor (2) cannot overflow in practice, as the runtime is + // unable to allocate object arrays with a size larger than + // 2^26 - 1 (that is, 2^28 - 4 bytes). + __ Lsl(index_reg, index_reg, TIMES_4); + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + __ Add(index_reg, index_reg, offset_); + } else { + // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile + // intrinsics, `index_` is not shifted by a scale factor of 2 + // (as in the case of ArrayGet), as it is actually an offset + // to an object field within an object. + DCHECK(instruction_->IsInvoke()) << instruction_->DebugName(); + DCHECK(instruction_->GetLocations()->Intrinsified()); + DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) || + (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)) + << instruction_->AsInvoke()->GetIntrinsic(); + DCHECK_EQ(offset_, 0U); + DCHECK(index_.IsRegisterPair()); + // UnsafeGet's offset location is a register pair, the low + // part contains the correct offset. + index = index_.ToLow(); + } + } + + // We're moving two or three locations to locations that could + // overlap, so we need a parallel move resolver. + InvokeRuntimeCallingConventionARMVIXL calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove(ref_, + LocationFrom(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + nullptr); + parallel_move.AddMove(obj_, + LocationFrom(calling_convention.GetRegisterAt(1)), + DataType::Type::kReference, + nullptr); + if (index.IsValid()) { + parallel_move.AddMove(index, + LocationFrom(calling_convention.GetRegisterAt(2)), + DataType::Type::kInt32, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + } else { + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + __ Mov(calling_convention.GetRegisterAt(2), offset_); + } + arm_codegen->InvokeRuntime(kQuickReadBarrierSlow, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes< + kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); + arm_codegen->Move32(out_, LocationFrom(r0)); + + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { + return "ReadBarrierForHeapReferenceSlowPathARMVIXL"; + } + + private: + vixl32::Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { + uint32_t ref = RegisterFrom(ref_).GetCode(); + uint32_t obj = RegisterFrom(obj_).GetCode(); + for (uint32_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { + if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) { + return vixl32::Register(i); + } + } + // We shall never fail to find a free caller-save register, as + // there are more than two core caller-save registers on ARM + // (meaning it is possible to find one which is different from + // `ref` and `obj`). + DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u); + LOG(FATAL) << "Could not find a free caller-save register"; + UNREACHABLE(); + } + + const Location out_; + const Location ref_; + const Location obj_; + const uint32_t offset_; + // An additional location containing an index to an array. + // Only used for HArrayGet and the UnsafeGetObject & + // UnsafeGetObjectVolatile intrinsics. + const Location index_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathARMVIXL); +}; + +// Slow path generating a read barrier for a GC root. +class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + ReadBarrierForRootSlowPathARMVIXL(HInstruction* instruction, Location out, Location root) + : SlowPathCodeARMVIXL(instruction), out_(out), root_(root) { + DCHECK(kEmitCompilerReadBarrier); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + vixl32::Register reg_out = RegisterFrom(out_); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.GetCode())); + DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) + << "Unexpected instruction in read barrier for GC root slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConventionARMVIXL calling_convention; + CodeGeneratorARMVIXL* arm_codegen = down_cast(codegen); + arm_codegen->Move32(LocationFrom(calling_convention.GetRegisterAt(0)), root_); + arm_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes*>(); + arm_codegen->Move32(out_, LocationFrom(r0)); + + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathARMVIXL"; } + + private: + const Location out_; + const Location root_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARMVIXL); +}; + +inline vixl32::Condition ARMCondition(IfCondition cond) { + switch (cond) { + case kCondEQ: return eq; + case kCondNE: return ne; + case kCondLT: return lt; + case kCondLE: return le; + case kCondGT: return gt; + case kCondGE: return ge; + case kCondB: return lo; + case kCondBE: return ls; + case kCondA: return hi; + case kCondAE: return hs; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +// Maps signed condition to unsigned condition. +inline vixl32::Condition ARMUnsignedCondition(IfCondition cond) { + switch (cond) { + case kCondEQ: return eq; + case kCondNE: return ne; + // Signed to unsigned. + case kCondLT: return lo; + case kCondLE: return ls; + case kCondGT: return hi; + case kCondGE: return hs; + // Unsigned remain unchanged. + case kCondB: return lo; + case kCondBE: return ls; + case kCondA: return hi; + case kCondAE: return hs; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +inline vixl32::Condition ARMFPCondition(IfCondition cond, bool gt_bias) { + // The ARM condition codes can express all the necessary branches, see the + // "Meaning (floating-point)" column in the table A8-1 of the ARMv7 reference manual. + // There is no dex instruction or HIR that would need the missing conditions + // "equal or unordered" or "not equal". + switch (cond) { + case kCondEQ: return eq; + case kCondNE: return ne /* unordered */; + case kCondLT: return gt_bias ? cc : lt /* unordered */; + case kCondLE: return gt_bias ? ls : le /* unordered */; + case kCondGT: return gt_bias ? hi /* unordered */ : gt; + case kCondGE: return gt_bias ? cs /* unordered */ : ge; + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +inline ShiftType ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind) { + switch (op_kind) { + case HDataProcWithShifterOp::kASR: return ShiftType::ASR; + case HDataProcWithShifterOp::kLSL: return ShiftType::LSL; + case HDataProcWithShifterOp::kLSR: return ShiftType::LSR; + default: + LOG(FATAL) << "Unexpected op kind " << op_kind; + UNREACHABLE(); + } +} + +void CodeGeneratorARMVIXL::DumpCoreRegister(std::ostream& stream, int reg) const { + stream << vixl32::Register(reg); +} + +void CodeGeneratorARMVIXL::DumpFloatingPointRegister(std::ostream& stream, int reg) const { + stream << vixl32::SRegister(reg); +} + +const ArmInstructionSetFeatures& CodeGeneratorARMVIXL::GetInstructionSetFeatures() const { + return *GetCompilerOptions().GetInstructionSetFeatures()->AsArmInstructionSetFeatures(); +} + +static uint32_t ComputeSRegisterListMask(const SRegisterList& regs) { + uint32_t mask = 0; + for (uint32_t i = regs.GetFirstSRegister().GetCode(); + i <= regs.GetLastSRegister().GetCode(); + ++i) { + mask |= (1 << i); + } + return mask; +} + +// Saves the register in the stack. Returns the size taken on stack. +size_t CodeGeneratorARMVIXL::SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + TODO_VIXL32(FATAL); + UNREACHABLE(); +} + +// Restores the register from the stack. Returns the size taken on stack. +size_t CodeGeneratorARMVIXL::RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + TODO_VIXL32(FATAL); + UNREACHABLE(); +} + +size_t CodeGeneratorARMVIXL::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + TODO_VIXL32(FATAL); + UNREACHABLE(); +} + +size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED, + uint32_t reg_id ATTRIBUTE_UNUSED) { + TODO_VIXL32(FATAL); + UNREACHABLE(); +} + +static void GenerateDataProcInstruction(HInstruction::InstructionKind kind, + vixl32::Register out, + vixl32::Register first, + const Operand& second, + CodeGeneratorARMVIXL* codegen) { + if (second.IsImmediate() && second.GetImmediate() == 0) { + const Operand in = kind == HInstruction::kAnd + ? Operand(0) + : Operand(first); + + __ Mov(out, in); + } else { + switch (kind) { + case HInstruction::kAdd: + __ Add(out, first, second); + break; + case HInstruction::kAnd: + __ And(out, first, second); + break; + case HInstruction::kOr: + __ Orr(out, first, second); + break; + case HInstruction::kSub: + __ Sub(out, first, second); + break; + case HInstruction::kXor: + __ Eor(out, first, second); + break; + default: + LOG(FATAL) << "Unexpected instruction kind: " << kind; + UNREACHABLE(); + } + } +} + +static void GenerateDataProc(HInstruction::InstructionKind kind, + const Location& out, + const Location& first, + const Operand& second_lo, + const Operand& second_hi, + CodeGeneratorARMVIXL* codegen) { + const vixl32::Register first_hi = HighRegisterFrom(first); + const vixl32::Register first_lo = LowRegisterFrom(first); + const vixl32::Register out_hi = HighRegisterFrom(out); + const vixl32::Register out_lo = LowRegisterFrom(out); + + if (kind == HInstruction::kAdd) { + __ Adds(out_lo, first_lo, second_lo); + __ Adc(out_hi, first_hi, second_hi); + } else if (kind == HInstruction::kSub) { + __ Subs(out_lo, first_lo, second_lo); + __ Sbc(out_hi, first_hi, second_hi); + } else { + GenerateDataProcInstruction(kind, out_lo, first_lo, second_lo, codegen); + GenerateDataProcInstruction(kind, out_hi, first_hi, second_hi, codegen); + } +} + +static Operand GetShifterOperand(vixl32::Register rm, ShiftType shift, uint32_t shift_imm) { + return shift_imm == 0 ? Operand(rm) : Operand(rm, shift, shift_imm); +} + +static void GenerateLongDataProc(HDataProcWithShifterOp* instruction, + CodeGeneratorARMVIXL* codegen) { + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); + DCHECK(HDataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())); + + const LocationSummary* const locations = instruction->GetLocations(); + const uint32_t shift_value = instruction->GetShiftAmount(); + const HInstruction::InstructionKind kind = instruction->GetInstrKind(); + const Location first = locations->InAt(0); + const Location second = locations->InAt(1); + const Location out = locations->Out(); + const vixl32::Register first_hi = HighRegisterFrom(first); + const vixl32::Register first_lo = LowRegisterFrom(first); + const vixl32::Register out_hi = HighRegisterFrom(out); + const vixl32::Register out_lo = LowRegisterFrom(out); + const vixl32::Register second_hi = HighRegisterFrom(second); + const vixl32::Register second_lo = LowRegisterFrom(second); + const ShiftType shift = ShiftFromOpKind(instruction->GetOpKind()); + + if (shift_value >= 32) { + if (shift == ShiftType::LSL) { + GenerateDataProcInstruction(kind, + out_hi, + first_hi, + Operand(second_lo, ShiftType::LSL, shift_value - 32), + codegen); + GenerateDataProcInstruction(kind, out_lo, first_lo, 0, codegen); + } else if (shift == ShiftType::ASR) { + GenerateDataProc(kind, + out, + first, + GetShifterOperand(second_hi, ShiftType::ASR, shift_value - 32), + Operand(second_hi, ShiftType::ASR, 31), + codegen); + } else { + DCHECK_EQ(shift, ShiftType::LSR); + GenerateDataProc(kind, + out, + first, + GetShifterOperand(second_hi, ShiftType::LSR, shift_value - 32), + 0, + codegen); + } + } else { + DCHECK_GT(shift_value, 1U); + DCHECK_LT(shift_value, 32U); + + UseScratchRegisterScope temps(codegen->GetVIXLAssembler()); + + if (shift == ShiftType::LSL) { + // We are not doing this for HInstruction::kAdd because the output will require + // Location::kOutputOverlap; not applicable to other cases. + if (kind == HInstruction::kOr || kind == HInstruction::kXor) { + GenerateDataProcInstruction(kind, + out_hi, + first_hi, + Operand(second_hi, ShiftType::LSL, shift_value), + codegen); + GenerateDataProcInstruction(kind, + out_hi, + out_hi, + Operand(second_lo, ShiftType::LSR, 32 - shift_value), + codegen); + GenerateDataProcInstruction(kind, + out_lo, + first_lo, + Operand(second_lo, ShiftType::LSL, shift_value), + codegen); + } else { + const vixl32::Register temp = temps.Acquire(); + + __ Lsl(temp, second_hi, shift_value); + __ Orr(temp, temp, Operand(second_lo, ShiftType::LSR, 32 - shift_value)); + GenerateDataProc(kind, + out, + first, + Operand(second_lo, ShiftType::LSL, shift_value), + temp, + codegen); + } + } else { + DCHECK(shift == ShiftType::ASR || shift == ShiftType::LSR); + + // We are not doing this for HInstruction::kAdd because the output will require + // Location::kOutputOverlap; not applicable to other cases. + if (kind == HInstruction::kOr || kind == HInstruction::kXor) { + GenerateDataProcInstruction(kind, + out_lo, + first_lo, + Operand(second_lo, ShiftType::LSR, shift_value), + codegen); + GenerateDataProcInstruction(kind, + out_lo, + out_lo, + Operand(second_hi, ShiftType::LSL, 32 - shift_value), + codegen); + GenerateDataProcInstruction(kind, + out_hi, + first_hi, + Operand(second_hi, shift, shift_value), + codegen); + } else { + const vixl32::Register temp = temps.Acquire(); + + __ Lsr(temp, second_lo, shift_value); + __ Orr(temp, temp, Operand(second_hi, ShiftType::LSL, 32 - shift_value)); + GenerateDataProc(kind, + out, + first, + temp, + Operand(second_hi, shift, shift_value), + codegen); + } + } + } +} + +static void GenerateVcmp(HInstruction* instruction, CodeGeneratorARMVIXL* codegen) { + const Location rhs_loc = instruction->GetLocations()->InAt(1); + if (rhs_loc.IsConstant()) { + // 0.0 is the only immediate that can be encoded directly in + // a VCMP instruction. + // + // Both the JLS (section 15.20.1) and the JVMS (section 6.5) + // specify that in a floating-point comparison, positive zero + // and negative zero are considered equal, so we can use the + // literal 0.0 for both cases here. + // + // Note however that some methods (Float.equal, Float.compare, + // Float.compareTo, Double.equal, Double.compare, + // Double.compareTo, Math.max, Math.min, StrictMath.max, + // StrictMath.min) consider 0.0 to be (strictly) greater than + // -0.0. So if we ever translate calls to these methods into a + // HCompare instruction, we must handle the -0.0 case with + // care here. + DCHECK(rhs_loc.GetConstant()->IsArithmeticZero()); + + const DataType::Type type = instruction->InputAt(0)->GetType(); + + if (type == DataType::Type::kFloat32) { + __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0); + } else { + DCHECK_EQ(type, DataType::Type::kFloat64); + __ Vcmp(F64, InputDRegisterAt(instruction, 0), 0.0); + } + } else { + __ Vcmp(InputVRegisterAt(instruction, 0), InputVRegisterAt(instruction, 1)); + } +} + +static int64_t AdjustConstantForCondition(int64_t value, + IfCondition* condition, + IfCondition* opposite) { + if (value == 1) { + if (*condition == kCondB) { + value = 0; + *condition = kCondEQ; + *opposite = kCondNE; + } else if (*condition == kCondAE) { + value = 0; + *condition = kCondNE; + *opposite = kCondEQ; + } + } else if (value == -1) { + if (*condition == kCondGT) { + value = 0; + *condition = kCondGE; + *opposite = kCondLT; + } else if (*condition == kCondLE) { + value = 0; + *condition = kCondLT; + *opposite = kCondGE; + } + } + + return value; +} + +static std::pair GenerateLongTestConstant( + HCondition* condition, + bool invert, + CodeGeneratorARMVIXL* codegen) { + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); + + const LocationSummary* const locations = condition->GetLocations(); + IfCondition cond = condition->GetCondition(); + IfCondition opposite = condition->GetOppositeCondition(); + + if (invert) { + std::swap(cond, opposite); + } + + std::pair ret(eq, ne); + const Location left = locations->InAt(0); + const Location right = locations->InAt(1); + + DCHECK(right.IsConstant()); + + const vixl32::Register left_high = HighRegisterFrom(left); + const vixl32::Register left_low = LowRegisterFrom(left); + int64_t value = AdjustConstantForCondition(Int64ConstantFrom(right), &cond, &opposite); + UseScratchRegisterScope temps(codegen->GetVIXLAssembler()); + + // Comparisons against 0 are common enough to deserve special attention. + if (value == 0) { + switch (cond) { + case kCondNE: + // x > 0 iff x != 0 when the comparison is unsigned. + case kCondA: + ret = std::make_pair(ne, eq); + FALLTHROUGH_INTENDED; + case kCondEQ: + // x <= 0 iff x == 0 when the comparison is unsigned. + case kCondBE: + __ Orrs(temps.Acquire(), left_low, left_high); + return ret; + case kCondLT: + case kCondGE: + __ Cmp(left_high, 0); + return std::make_pair(ARMCondition(cond), ARMCondition(opposite)); + // Trivially true or false. + case kCondB: + ret = std::make_pair(ne, eq); + FALLTHROUGH_INTENDED; + case kCondAE: + __ Cmp(left_low, left_low); + return ret; + default: + break; + } + } + + switch (cond) { + case kCondEQ: + case kCondNE: + case kCondB: + case kCondBE: + case kCondA: + case kCondAE: { + const uint32_t value_low = Low32Bits(value); + Operand operand_low(value_low); + + __ Cmp(left_high, High32Bits(value)); + + // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, + // we must ensure that the operands corresponding to the least significant + // halves of the inputs fit into a 16-bit CMP encoding. + if (!left_low.IsLow() || !IsUint<8>(value_low)) { + operand_low = Operand(temps.Acquire()); + __ Mov(LeaveFlags, operand_low.GetBaseRegister(), value_low); + } + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(eq); + __ cmp(eq, left_low, operand_low); + ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite)); + break; + } + case kCondLE: + case kCondGT: + // Trivially true or false. + if (value == std::numeric_limits::max()) { + __ Cmp(left_low, left_low); + ret = cond == kCondLE ? std::make_pair(eq, ne) : std::make_pair(ne, eq); + break; + } + + if (cond == kCondLE) { + DCHECK_EQ(opposite, kCondGT); + cond = kCondLT; + opposite = kCondGE; + } else { + DCHECK_EQ(cond, kCondGT); + DCHECK_EQ(opposite, kCondLE); + cond = kCondGE; + opposite = kCondLT; + } + + value++; + FALLTHROUGH_INTENDED; + case kCondGE: + case kCondLT: { + __ Cmp(left_low, Low32Bits(value)); + __ Sbcs(temps.Acquire(), left_high, High32Bits(value)); + ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite)); + break; + } + default: + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + + return ret; +} + +static std::pair GenerateLongTest( + HCondition* condition, + bool invert, + CodeGeneratorARMVIXL* codegen) { + DCHECK_EQ(condition->GetLeft()->GetType(), DataType::Type::kInt64); + + const LocationSummary* const locations = condition->GetLocations(); + IfCondition cond = condition->GetCondition(); + IfCondition opposite = condition->GetOppositeCondition(); + + if (invert) { + std::swap(cond, opposite); + } + + std::pair ret(eq, ne); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + DCHECK(right.IsRegisterPair()); + + switch (cond) { + case kCondEQ: + case kCondNE: + case kCondB: + case kCondBE: + case kCondA: + case kCondAE: { + __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(eq); + __ cmp(eq, LowRegisterFrom(left), LowRegisterFrom(right)); + ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite)); + break; + } + case kCondLE: + case kCondGT: + if (cond == kCondLE) { + DCHECK_EQ(opposite, kCondGT); + cond = kCondGE; + opposite = kCondLT; + } else { + DCHECK_EQ(cond, kCondGT); + DCHECK_EQ(opposite, kCondLE); + cond = kCondLT; + opposite = kCondGE; + } + + std::swap(left, right); + FALLTHROUGH_INTENDED; + case kCondGE: + case kCondLT: { + UseScratchRegisterScope temps(codegen->GetVIXLAssembler()); + + __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); + __ Sbcs(temps.Acquire(), HighRegisterFrom(left), HighRegisterFrom(right)); + ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite)); + break; + } + default: + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + + return ret; +} + +static std::pair GenerateTest(HCondition* condition, + bool invert, + CodeGeneratorARMVIXL* codegen) { + const DataType::Type type = condition->GetLeft()->GetType(); + IfCondition cond = condition->GetCondition(); + IfCondition opposite = condition->GetOppositeCondition(); + std::pair ret(eq, ne); + + if (invert) { + std::swap(cond, opposite); + } + + if (type == DataType::Type::kInt64) { + ret = condition->GetLocations()->InAt(1).IsConstant() + ? GenerateLongTestConstant(condition, invert, codegen) + : GenerateLongTest(condition, invert, codegen); + } else if (DataType::IsFloatingPointType(type)) { + GenerateVcmp(condition, codegen); + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()), + ARMFPCondition(opposite, condition->IsGtBias())); + } else { + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; + __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1)); + ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite)); + } + + return ret; +} + +static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* codegen) { + const vixl32::Register out = OutputRegister(cond); + const auto condition = GenerateTest(cond, false, codegen); + + __ Mov(LeaveFlags, out, 0); + + if (out.IsLow()) { + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(condition.first); + __ mov(condition.first, out, 1); + } else { + vixl32::Label done_label; + vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label); + + __ B(condition.second, final_label, /* is_far_target= */ false); + __ Mov(out, 1); + + if (done_label.IsReferenced()) { + __ Bind(&done_label); + } + } +} + +static void GenerateEqualLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) { + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); + + const LocationSummary* const locations = cond->GetLocations(); + IfCondition condition = cond->GetCondition(); + const vixl32::Register out = OutputRegister(cond); + const Location left = locations->InAt(0); + const Location right = locations->InAt(1); + vixl32::Register left_high = HighRegisterFrom(left); + vixl32::Register left_low = LowRegisterFrom(left); + vixl32::Register temp; + UseScratchRegisterScope temps(codegen->GetVIXLAssembler()); + + if (right.IsConstant()) { + IfCondition opposite = cond->GetOppositeCondition(); + const int64_t value = AdjustConstantForCondition(Int64ConstantFrom(right), + &condition, + &opposite); + Operand right_high = High32Bits(value); + Operand right_low = Low32Bits(value); + + // The output uses Location::kNoOutputOverlap. + if (out.Is(left_high)) { + std::swap(left_low, left_high); + std::swap(right_low, right_high); + } + + __ Sub(out, left_low, right_low); + temp = temps.Acquire(); + __ Sub(temp, left_high, right_high); + } else { + DCHECK(right.IsRegisterPair()); + temp = temps.Acquire(); + __ Sub(temp, left_high, HighRegisterFrom(right)); + __ Sub(out, left_low, LowRegisterFrom(right)); + } + + // Need to check after calling AdjustConstantForCondition(). + DCHECK(condition == kCondEQ || condition == kCondNE) << condition; + + if (condition == kCondNE && out.IsLow()) { + __ Orrs(out, out, temp); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(ne); + __ mov(ne, out, 1); + } else { + __ Orr(out, out, temp); + codegen->GenerateConditionWithZero(condition, out, out, temp); + } +} + +static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) { + DCHECK_EQ(cond->GetLeft()->GetType(), DataType::Type::kInt64); + + const LocationSummary* const locations = cond->GetLocations(); + IfCondition condition = cond->GetCondition(); + const vixl32::Register out = OutputRegister(cond); + const Location left = locations->InAt(0); + const Location right = locations->InAt(1); + + if (right.IsConstant()) { + IfCondition opposite = cond->GetOppositeCondition(); + + // Comparisons against 0 are common enough to deserve special attention. + if (AdjustConstantForCondition(Int64ConstantFrom(right), &condition, &opposite) == 0) { + switch (condition) { + case kCondNE: + case kCondA: + if (out.IsLow()) { + // We only care if both input registers are 0 or not. + __ Orrs(out, LowRegisterFrom(left), HighRegisterFrom(left)); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(ne); + __ mov(ne, out, 1); + return; + } + + FALLTHROUGH_INTENDED; + case kCondEQ: + case kCondBE: + // We only care if both input registers are 0 or not. + __ Orr(out, LowRegisterFrom(left), HighRegisterFrom(left)); + codegen->GenerateConditionWithZero(condition, out, out); + return; + case kCondLT: + case kCondGE: + // We only care about the sign bit. + FALLTHROUGH_INTENDED; + case kCondAE: + case kCondB: + codegen->GenerateConditionWithZero(condition, out, HighRegisterFrom(left)); + return; + case kCondLE: + case kCondGT: + default: + break; + } + } + } + + // If `out` is a low register, then the GenerateConditionGeneric() + // function generates a shorter code sequence that is still branchless. + if ((condition == kCondEQ || condition == kCondNE) && !out.IsLow()) { + GenerateEqualLong(cond, codegen); + return; + } + + GenerateConditionGeneric(cond, codegen); +} + +static void GenerateConditionIntegralOrNonPrimitive(HCondition* cond, + CodeGeneratorARMVIXL* codegen) { + const DataType::Type type = cond->GetLeft()->GetType(); + + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; + + if (type == DataType::Type::kInt64) { + GenerateConditionLong(cond, codegen); + return; + } + + IfCondition condition = cond->GetCondition(); + vixl32::Register in = InputRegisterAt(cond, 0); + const vixl32::Register out = OutputRegister(cond); + const Location right = cond->GetLocations()->InAt(1); + int64_t value; + + if (right.IsConstant()) { + IfCondition opposite = cond->GetOppositeCondition(); + + value = AdjustConstantForCondition(Int64ConstantFrom(right), &condition, &opposite); + + // Comparisons against 0 are common enough to deserve special attention. + if (value == 0) { + switch (condition) { + case kCondNE: + case kCondA: + if (out.IsLow() && out.Is(in)) { + __ Cmp(out, 0); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(ne); + __ mov(ne, out, 1); + return; + } + + FALLTHROUGH_INTENDED; + case kCondEQ: + case kCondBE: + case kCondLT: + case kCondGE: + case kCondAE: + case kCondB: + codegen->GenerateConditionWithZero(condition, out, in); + return; + case kCondLE: + case kCondGT: + default: + break; + } + } + } + + if (condition == kCondEQ || condition == kCondNE) { + Operand operand(0); + + if (right.IsConstant()) { + operand = Operand::From(value); + } else if (out.Is(RegisterFrom(right))) { + // Avoid 32-bit instructions if possible. + operand = InputOperandAt(cond, 0); + in = RegisterFrom(right); + } else { + operand = InputOperandAt(cond, 1); + } + + if (condition == kCondNE && out.IsLow()) { + __ Subs(out, in, operand); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(codegen->GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(ne); + __ mov(ne, out, 1); + } else { + __ Sub(out, in, operand); + codegen->GenerateConditionWithZero(condition, out, out); + } + + return; + } + + GenerateConditionGeneric(cond, codegen); +} + +static bool CanEncodeConstantAs8BitImmediate(HConstant* constant) { + const DataType::Type type = constant->GetType(); + bool ret = false; + + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; + + if (type == DataType::Type::kInt64) { + const uint64_t value = Uint64ConstantFrom(constant); + + ret = IsUint<8>(Low32Bits(value)) && IsUint<8>(High32Bits(value)); + } else { + ret = IsUint<8>(Int32ConstantFrom(constant)); + } + + return ret; +} + +static Location Arm8BitEncodableConstantOrRegister(HInstruction* constant) { + DCHECK(!DataType::IsFloatingPointType(constant->GetType())); + + if (constant->IsConstant() && CanEncodeConstantAs8BitImmediate(constant->AsConstant())) { + return Location::ConstantLocation(constant->AsConstant()); + } + + return Location::RequiresRegister(); +} + +static bool CanGenerateConditionalMove(const Location& out, const Location& src) { + // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, + // we check that we are not dealing with floating-point output (there is no + // 16-bit VMOV encoding). + if (!out.IsRegister() && !out.IsRegisterPair()) { + return false; + } + + // For constants, we also check that the output is in one or two low registers, + // and that the constants fit in an 8-bit unsigned integer, so that a 16-bit + // MOV encoding can be used. + if (src.IsConstant()) { + if (!CanEncodeConstantAs8BitImmediate(src.GetConstant())) { + return false; + } + + if (out.IsRegister()) { + if (!RegisterFrom(out).IsLow()) { + return false; + } + } else { + DCHECK(out.IsRegisterPair()); + + if (!HighRegisterFrom(out).IsLow()) { + return false; + } + } + } + + return true; +} + +#undef __ + +vixl32::Label* CodeGeneratorARMVIXL::GetFinalLabel(HInstruction* instruction, + vixl32::Label* final_label) { + DCHECK(!instruction->IsControlFlow() && !instruction->IsSuspendCheck()); + DCHECK(!instruction->IsInvoke() || !instruction->GetLocations()->CanCall()); + + const HBasicBlock* const block = instruction->GetBlock(); + const HLoopInformation* const info = block->GetLoopInformation(); + HInstruction* const next = instruction->GetNext(); + + // Avoid a branch to a branch. + if (next->IsGoto() && (info == nullptr || + !info->IsBackEdge(*block) || + !info->HasSuspendCheck())) { + final_label = GetLabelOf(next->AsGoto()->GetSuccessor()); + } + + return final_label; +} + +CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) + : CodeGenerator(graph, + kNumberOfCoreRegisters, + kNumberOfSRegisters, + kNumberOfRegisterPairs, + kCoreCalleeSaves.GetList(), + ComputeSRegisterListMask(kFpuCalleeSaves), + compiler_options, + stats), + block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + location_builder_(graph, this), + instruction_visitor_(graph, this), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), + boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + call_entrypoint_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + uint32_literals_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(StringReferenceValueComparator(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(TypeReferenceValueComparator(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_baker_read_barrier_slow_paths_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + // Always save the LR register to mimic Quick. + AddAllocatedRegister(Location::RegisterLocation(LR)); + // Give D30 and D31 as scratch register to VIXL. The register allocator only works on + // S0-S31, which alias to D0-D15. + GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d31); + GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d30); +} + +void JumpTableARMVIXL::EmitTable(CodeGeneratorARMVIXL* codegen) { + uint32_t num_entries = switch_instr_->GetNumEntries(); + DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold); + + // We are about to use the assembler to place literals directly. Make sure we have enough + // underlying code buffer and we have generated a jump table of the right size, using + // codegen->GetVIXLAssembler()->GetBuffer().Align(); + ExactAssemblyScope aas(codegen->GetVIXLAssembler(), + num_entries * sizeof(int32_t), + CodeBufferCheckScope::kMaximumSize); + // TODO(VIXL): Check that using lower case bind is fine here. + codegen->GetVIXLAssembler()->bind(&table_start_); + for (uint32_t i = 0; i < num_entries; i++) { + codegen->GetVIXLAssembler()->place(bb_addresses_[i].get()); + } +} + +void JumpTableARMVIXL::FixTable(CodeGeneratorARMVIXL* codegen) { + uint32_t num_entries = switch_instr_->GetNumEntries(); + DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold); + + const ArenaVector& successors = switch_instr_->GetBlock()->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + vixl32::Label* target_label = codegen->GetLabelOf(successors[i]); + DCHECK(target_label->IsBound()); + int32_t jump_offset = target_label->GetLocation() - table_start_.GetLocation(); + // When doing BX to address we need to have lower bit set to 1 in T32. + if (codegen->GetVIXLAssembler()->IsUsingT32()) { + jump_offset++; + } + DCHECK_GT(jump_offset, std::numeric_limits::min()); + DCHECK_LE(jump_offset, std::numeric_limits::max()); + + bb_addresses_[i].get()->UpdateValue(jump_offset, codegen->GetVIXLAssembler()->GetBuffer()); + } +} + +void CodeGeneratorARMVIXL::FixJumpTables() { + for (auto&& jump_table : jump_tables_) { + jump_table->FixTable(this); + } +} + +#define __ reinterpret_cast(GetAssembler())->GetVIXLAssembler()-> // NOLINT + +void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) { + FixJumpTables(); + + // Emit JIT baker read barrier slow paths. + DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); + for (auto& entry : jit_baker_read_barrier_slow_paths_) { + uint32_t encoded_data = entry.first; + vixl::aarch32::Label* slow_path_entry = &entry.second.label; + __ Bind(slow_path_entry); + CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr); + } + + GetAssembler()->FinalizeCode(); + CodeGenerator::Finalize(allocator); + + // Verify Baker read barrier linker patches. + if (kIsDebugBuild) { + ArrayRef code = allocator->GetMemory(); + for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { + DCHECK(info.label.IsBound()); + uint32_t literal_offset = info.label.GetLocation(); + DCHECK_ALIGNED(literal_offset, 2u); + + auto GetInsn16 = [&code](uint32_t offset) { + DCHECK_ALIGNED(offset, 2u); + return (static_cast(code[offset + 0]) << 0) + + (static_cast(code[offset + 1]) << 8); + }; + auto GetInsn32 = [=](uint32_t offset) { + return (GetInsn16(offset) << 16) + (GetInsn16(offset + 2u) << 0); + }; + + uint32_t encoded_data = info.custom_data; + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + // Check that the next instruction matches the expected LDR. + switch (kind) { + case BakerReadBarrierKind::kField: { + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + if (width == BakerReadBarrierWidth::kWide) { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn32(literal_offset + 4u); + // LDR (immediate), encoding T3, with correct base_reg. + CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16)); + } else { + DCHECK_GE(code.size() - literal_offset, 6u); + uint32_t next_insn = GetInsn16(literal_offset + 4u); + // LDR (immediate), encoding T1, with correct base_reg. + CheckValidReg(next_insn & 0x7u); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3)); + } + break; + } + case BakerReadBarrierKind::kArray: { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn32(literal_offset + 4u); + // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]). + CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16)); + CheckValidReg(next_insn & 0xf); // Check index register + break; + } + case BakerReadBarrierKind::kGcRoot: { + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + if (width == BakerReadBarrierWidth::kWide) { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn32(literal_offset - 4u); + // LDR (immediate), encoding T3, with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12)); + } else { + DCHECK_GE(literal_offset, 2u); + uint32_t prev_insn = GetInsn16(literal_offset - 2u); + // LDR (immediate), encoding T1, with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg); + } + break; + } + case BakerReadBarrierKind::kUnsafeCas: { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn32(literal_offset - 4u); + // ADD (register), encoding T3, with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xfff0fff0u, 0xeb000000u | (root_reg << 8)); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast(kind); + UNREACHABLE(); + } + } + } +} + +void CodeGeneratorARMVIXL::SetupBlockedRegisters() const { + // Stack register, LR and PC are always reserved. + blocked_core_registers_[SP] = true; + blocked_core_registers_[LR] = true; + blocked_core_registers_[PC] = true; + + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Reserve marking register. + blocked_core_registers_[MR] = true; + } + + // Reserve thread register. + blocked_core_registers_[TR] = true; + + // Reserve temp register. + blocked_core_registers_[IP] = true; + + if (GetGraph()->IsDebuggable()) { + // Stubs do not save callee-save floating point registers. If the graph + // is debuggable, we need to deal with these registers differently. For + // now, just block them. + for (uint32_t i = kFpuCalleeSaves.GetFirstSRegister().GetCode(); + i <= kFpuCalleeSaves.GetLastSRegister().GetCode(); + ++i) { + blocked_fpu_registers_[i] = true; + } + } +} + +InstructionCodeGeneratorARMVIXL::InstructionCodeGeneratorARMVIXL(HGraph* graph, + CodeGeneratorARMVIXL* codegen) + : InstructionCodeGenerator(graph, codegen), + assembler_(codegen->GetAssembler()), + codegen_(codegen) {} + +void CodeGeneratorARMVIXL::ComputeSpillMask() { + core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; + DCHECK_NE(core_spill_mask_ & (1u << kLrCode), 0u) + << "At least the return address register must be saved"; + // 16-bit PUSH/POP (T1) can save/restore just the LR/PC. + DCHECK(GetVIXLAssembler()->IsUsingT32()); + fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; + // We use vpush and vpop for saving and restoring floating point registers, which take + // a SRegister and the number of registers to save/restore after that SRegister. We + // therefore update the `fpu_spill_mask_` to also contain those registers not allocated, + // but in the range. + if (fpu_spill_mask_ != 0) { + uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_); + uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_); + for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) { + fpu_spill_mask_ |= (1 << i); + } + } +} + +void CodeGeneratorARMVIXL::MaybeIncrementHotness(bool is_frame_entry) { + if (GetCompilerOptions().CountHotnessInCompiledCode()) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + static_assert(ArtMethod::MaxCounter() == 0xFFFF, "asm is probably wrong"); + if (!is_frame_entry) { + __ Push(vixl32::Register(kMethodRegister)); + GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize); + } + // Load with zero extend to clear the high bits for integer overflow check. + __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value())); + __ Add(temp, temp, 1); + // Subtract one if the counter would overflow. + __ Sub(temp, temp, Operand(temp, ShiftType::LSR, 16)); + __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value())); + if (!is_frame_entry) { + __ Pop(vixl32::Register(kMethodRegister)); + } + } + + if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + ScopedObjectAccess soa(Thread::Current()); + ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize); + if (info != nullptr) { + uint32_t address = reinterpret_cast32(info); + vixl::aarch32::Label done; + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + if (!is_frame_entry) { + __ Push(r4); // Will be used as temporary. For frame entry, r4 is always available. + } + __ Mov(r4, address); + __ Ldrh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value())); + __ Add(ip, ip, 1); + __ Strh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value())); + if (!is_frame_entry) { + __ Pop(r4); + } + __ Lsls(ip, ip, 16); + __ B(ne, &done); + uint32_t entry_point_offset = + GetThreadOffset(kQuickCompileOptimized).Int32Value(); + if (HasEmptyFrame()) { + CHECK(is_frame_entry); + // For leaf methods, we need to spill lr and r0. Also spill r1 and r2 for + // alignment. + uint32_t core_spill_mask = + (1 << lr.GetCode()) | (1 << r0.GetCode()) | (1 << r1.GetCode()) | (1 << r2.GetCode()); + __ Push(RegisterList(core_spill_mask)); + __ Ldr(lr, MemOperand(tr, entry_point_offset)); + __ Blx(lr); + __ Pop(RegisterList(core_spill_mask)); + } else { + if (!RequiresCurrentMethod()) { + CHECK(is_frame_entry); + GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0); + } + __ Ldr(lr, MemOperand(tr, entry_point_offset)); + __ Blx(lr); + } + __ Bind(&done); + } + } +} + +void CodeGeneratorARMVIXL::GenerateFrameEntry() { + bool skip_overflow_check = + IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm); + DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); + __ Bind(&frame_entry_label_); + + if (HasEmptyFrame()) { + // Ensure that the CFI opcode list is not empty. + GetAssembler()->cfi().Nop(); + MaybeIncrementHotness(/* is_frame_entry= */ true); + return; + } + + if (!skip_overflow_check) { + // Using r4 instead of IP saves 2 bytes. + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp; + // TODO: Remove this check when R4 is made a callee-save register + // in ART compiled code (b/72801708). Currently we need to make + // sure r4 is not blocked, e.g. in special purpose + // TestCodeGeneratorARMVIXL; also asserting that r4 is available + // here. + if (!blocked_core_registers_[R4]) { + for (vixl32::Register reg : kParameterCoreRegistersVIXL) { + DCHECK(!reg.Is(r4)); + } + DCHECK(!kCoreCalleeSaves.Includes(r4)); + temp = r4; + } else { + temp = temps.Acquire(); + } + __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm))); + // The load must immediately precede RecordPcInfo. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ ldr(temp, MemOperand(temp)); + RecordPcInfo(nullptr, 0); + } + + uint32_t frame_size = GetFrameSize(); + uint32_t core_spills_offset = frame_size - GetCoreSpillSize(); + uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize(); + if ((fpu_spill_mask_ == 0u || IsPowerOfTwo(fpu_spill_mask_)) && + core_spills_offset <= 3u * kArmWordSize) { + // Do a single PUSH for core registers including the method and up to two + // filler registers. Then store the single FP spill if any. + // (The worst case is when the method is not required and we actually + // store 3 extra registers but they are stored in the same properly + // aligned 16-byte chunk where we're already writing anyway.) + DCHECK_EQ(kMethodRegister.GetCode(), 0u); + uint32_t extra_regs = MaxInt(core_spills_offset / kArmWordSize); + DCHECK_LT(MostSignificantBit(extra_regs), LeastSignificantBit(core_spill_mask_)); + __ Push(RegisterList(core_spill_mask_ | extra_regs)); + GetAssembler()->cfi().AdjustCFAOffset(frame_size); + GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister), + core_spills_offset, + core_spill_mask_, + kArmWordSize); + if (fpu_spill_mask_ != 0u) { + DCHECK(IsPowerOfTwo(fpu_spill_mask_)); + vixl::aarch32::SRegister sreg(LeastSignificantBit(fpu_spill_mask_)); + GetAssembler()->StoreSToOffset(sreg, sp, fp_spills_offset); + GetAssembler()->cfi().RelOffset(DWARFReg(sreg), /*offset=*/ fp_spills_offset); + } + } else { + __ Push(RegisterList(core_spill_mask_)); + GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_)); + GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister), + /*offset=*/ 0, + core_spill_mask_, + kArmWordSize); + if (fpu_spill_mask_ != 0) { + uint32_t first = LeastSignificantBit(fpu_spill_mask_); + + // Check that list is contiguous. + DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_))); + + __ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_))); + GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_)); + GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0), + /*offset=*/ 0, + fpu_spill_mask_, + kArmWordSize); + } + + // Adjust SP and save the current method if we need it. Note that we do + // not save the method in HCurrentMethod, as the instruction might have + // been removed in the SSA graph. + if (RequiresCurrentMethod() && fp_spills_offset <= 3 * kArmWordSize) { + DCHECK_EQ(kMethodRegister.GetCode(), 0u); + __ Push(RegisterList(MaxInt(fp_spills_offset / kArmWordSize))); + GetAssembler()->cfi().AdjustCFAOffset(fp_spills_offset); + } else { + __ Sub(sp, sp, dchecked_integral_cast(fp_spills_offset)); + GetAssembler()->cfi().AdjustCFAOffset(fp_spills_offset); + if (RequiresCurrentMethod()) { + GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0); + } + } + } + + if (GetGraph()->HasShouldDeoptimizeFlag()) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + // Initialize should_deoptimize flag to 0. + __ Mov(temp, 0); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag()); + } + + MaybeIncrementHotness(/* is_frame_entry= */ true); + MaybeGenerateMarkingRegisterCheck(/* code= */ 1); +} + +void CodeGeneratorARMVIXL::GenerateFrameExit() { + if (HasEmptyFrame()) { + __ Bx(lr); + return; + } + + // Pop LR into PC to return. + DCHECK_NE(core_spill_mask_ & (1 << kLrCode), 0U); + uint32_t pop_mask = (core_spill_mask_ & (~(1 << kLrCode))) | 1 << kPcCode; + + uint32_t frame_size = GetFrameSize(); + uint32_t core_spills_offset = frame_size - GetCoreSpillSize(); + uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize(); + if ((fpu_spill_mask_ == 0u || IsPowerOfTwo(fpu_spill_mask_)) && + // r4 is blocked by TestCodeGeneratorARMVIXL used by some tests. + core_spills_offset <= (blocked_core_registers_[r4.GetCode()] ? 2u : 3u) * kArmWordSize) { + // Load the FP spill if any and then do a single POP including the method + // and up to two filler registers. If we have no FP spills, this also has + // the advantage that we do not need to emit CFI directives. + if (fpu_spill_mask_ != 0u) { + DCHECK(IsPowerOfTwo(fpu_spill_mask_)); + vixl::aarch32::SRegister sreg(LeastSignificantBit(fpu_spill_mask_)); + GetAssembler()->cfi().RememberState(); + GetAssembler()->LoadSFromOffset(sreg, sp, fp_spills_offset); + GetAssembler()->cfi().Restore(DWARFReg(sreg)); + } + // Clobber registers r2-r4 as they are caller-save in ART managed ABI and + // never hold the return value. + uint32_t extra_regs = MaxInt(core_spills_offset / kArmWordSize) << r2.GetCode(); + DCHECK_EQ(extra_regs & kCoreCalleeSaves.GetList(), 0u); + DCHECK_LT(MostSignificantBit(extra_regs), LeastSignificantBit(pop_mask)); + __ Pop(RegisterList(pop_mask | extra_regs)); + if (fpu_spill_mask_ != 0u) { + GetAssembler()->cfi().RestoreState(); + } + } else { + GetAssembler()->cfi().RememberState(); + __ Add(sp, sp, fp_spills_offset); + GetAssembler()->cfi().AdjustCFAOffset(-dchecked_integral_cast(fp_spills_offset)); + if (fpu_spill_mask_ != 0) { + uint32_t first = LeastSignificantBit(fpu_spill_mask_); + + // Check that list is contiguous. + DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_))); + + __ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_))); + GetAssembler()->cfi().AdjustCFAOffset( + -static_cast(kArmWordSize) * POPCOUNT(fpu_spill_mask_)); + GetAssembler()->cfi().RestoreMany(DWARFReg(vixl32::SRegister(0)), fpu_spill_mask_); + } + __ Pop(RegisterList(pop_mask)); + GetAssembler()->cfi().RestoreState(); + GetAssembler()->cfi().DefCFAOffset(GetFrameSize()); + } +} + +void CodeGeneratorARMVIXL::Bind(HBasicBlock* block) { + __ Bind(GetLabelOf(block)); +} + +Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Type type) { + switch (type) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + uint32_t index = gp_index_++; + uint32_t stack_index = stack_index_++; + if (index < calling_convention.GetNumberOfRegisters()) { + return LocationFrom(calling_convention.GetRegisterAt(index)); + } else { + return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); + } + } + + case DataType::Type::kInt64: { + uint32_t index = gp_index_; + uint32_t stack_index = stack_index_; + gp_index_ += 2; + stack_index_ += 2; + if (index + 1 < calling_convention.GetNumberOfRegisters()) { + if (calling_convention.GetRegisterAt(index).Is(r1)) { + // Skip R1, and use R2_R3 instead. + gp_index_++; + index++; + } + } + if (index + 1 < calling_convention.GetNumberOfRegisters()) { + DCHECK_EQ(calling_convention.GetRegisterAt(index).GetCode() + 1, + calling_convention.GetRegisterAt(index + 1).GetCode()); + + return LocationFrom(calling_convention.GetRegisterAt(index), + calling_convention.GetRegisterAt(index + 1)); + } else { + return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); + } + } + + case DataType::Type::kFloat32: { + uint32_t stack_index = stack_index_++; + if (float_index_ % 2 == 0) { + float_index_ = std::max(double_index_, float_index_); + } + if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) { + return LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++)); + } else { + return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); + } + } + + case DataType::Type::kFloat64: { + double_index_ = std::max(double_index_, RoundUp(float_index_, 2)); + uint32_t stack_index = stack_index_; + stack_index_ += 2; + if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) { + uint32_t index = double_index_; + double_index_ += 2; + Location result = LocationFrom( + calling_convention.GetFpuRegisterAt(index), + calling_convention.GetFpuRegisterAt(index + 1)); + DCHECK(ExpectedPairLayout(result)); + return result; + } else { + return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); + } + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unexpected parameter type " << type; + UNREACHABLE(); + } + return Location::NoLocation(); +} + +Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::Type type) const { + switch (type) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kUint32: + case DataType::Type::kInt32: { + return LocationFrom(r0); + } + + case DataType::Type::kFloat32: { + return LocationFrom(s0); + } + + case DataType::Type::kUint64: + case DataType::Type::kInt64: { + return LocationFrom(r0, r1); + } + + case DataType::Type::kFloat64: { + return LocationFrom(s0, s1); + } + + case DataType::Type::kVoid: + return Location::NoLocation(); + } + + UNREACHABLE(); +} + +Location InvokeDexCallingConventionVisitorARMVIXL::GetMethodLocation() const { + return LocationFrom(kMethodRegister); +} + +void CodeGeneratorARMVIXL::Move32(Location destination, Location source) { + if (source.Equals(destination)) { + return; + } + if (destination.IsRegister()) { + if (source.IsRegister()) { + __ Mov(RegisterFrom(destination), RegisterFrom(source)); + } else if (source.IsFpuRegister()) { + __ Vmov(RegisterFrom(destination), SRegisterFrom(source)); + } else { + GetAssembler()->LoadFromOffset(kLoadWord, + RegisterFrom(destination), + sp, + source.GetStackIndex()); + } + } else if (destination.IsFpuRegister()) { + if (source.IsRegister()) { + __ Vmov(SRegisterFrom(destination), RegisterFrom(source)); + } else if (source.IsFpuRegister()) { + __ Vmov(SRegisterFrom(destination), SRegisterFrom(source)); + } else { + GetAssembler()->LoadSFromOffset(SRegisterFrom(destination), sp, source.GetStackIndex()); + } + } else { + DCHECK(destination.IsStackSlot()) << destination; + if (source.IsRegister()) { + GetAssembler()->StoreToOffset(kStoreWord, + RegisterFrom(source), + sp, + destination.GetStackIndex()); + } else if (source.IsFpuRegister()) { + GetAssembler()->StoreSToOffset(SRegisterFrom(source), sp, destination.GetStackIndex()); + } else { + DCHECK(source.IsStackSlot()) << source; + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, source.GetStackIndex()); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + } + } +} + +void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) { + DCHECK(location.IsRegister()); + __ Mov(RegisterFrom(location), value); +} + +void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) { + // TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in + // `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend. + HParallelMove move(GetGraph()->GetAllocator()); + move.AddMove(src, dst, dst_type, nullptr); + GetMoveResolver()->EmitNativeCode(&move); +} + +void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else if (location.IsRegisterPair()) { + locations->AddTemp(LocationFrom(LowRegisterFrom(location))); + locations->AddTemp(LocationFrom(HighRegisterFrom(location))); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + +void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + ValidateInvokeRuntime(entrypoint, instruction, slow_path); + + ThreadOffset32 entrypoint_offset = GetThreadOffset(entrypoint); + // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the + // entire oat file. This adds an extra branch and we do not want to slow down the main path. + // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative. + if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) { + __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value())); + // Ensure the pc position is recorded immediately after the `blx` instruction. + // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + __ blx(lr); + if (EntrypointRequiresStackMap(entrypoint)) { + RecordPcInfo(instruction, dex_pc, slow_path); + } + } else { + // Ensure the pc position is recorded immediately after the `bl` instruction. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k32BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + EmitEntrypointThunkCall(entrypoint_offset); + if (EntrypointRequiresStackMap(entrypoint)) { + RecordPcInfo(instruction, dex_pc, slow_path); + } + } +} + +void CodeGeneratorARMVIXL::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, + HInstruction* instruction, + SlowPathCode* slow_path) { + ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path); + __ Ldr(lr, MemOperand(tr, entry_point_offset)); + __ Blx(lr); +} + +void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) { + if (successor->IsExitBlock()) { + DCHECK(got->GetPrevious()->AlwaysThrows()); + return; // no code needed + } + + HBasicBlock* block = got->GetBlock(); + HInstruction* previous = got->GetPrevious(); + HLoopInformation* info = block->GetLoopInformation(); + + if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { + codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false); + GenerateSuspendCheck(info->GetSuspendCheck(), successor); + return; + } + if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { + GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2); + } + if (!codegen_->GoesToNextBlock(block, successor)) { + __ B(codegen_->GetLabelOf(successor)); + } +} + +void LocationsBuilderARMVIXL::VisitGoto(HGoto* got) { + got->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitGoto(HGoto* got) { + HandleGoto(got, got->GetSuccessor()); +} + +void LocationsBuilderARMVIXL::VisitTryBoundary(HTryBoundary* try_boundary) { + try_boundary->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitTryBoundary(HTryBoundary* try_boundary) { + HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor(); + if (!successor->IsExitBlock()) { + HandleGoto(try_boundary, successor); + } +} + +void LocationsBuilderARMVIXL::VisitExit(HExit* exit) { + exit->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { +} + +void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition, + vixl32::Label* true_target, + vixl32::Label* false_target, + bool is_far_target) { + if (true_target == false_target) { + DCHECK(true_target != nullptr); + __ B(true_target); + return; + } + + vixl32::Label* non_fallthrough_target; + bool invert; + bool emit_both_branches; + + if (true_target == nullptr) { + // The true target is fallthrough. + DCHECK(false_target != nullptr); + non_fallthrough_target = false_target; + invert = true; + emit_both_branches = false; + } else { + non_fallthrough_target = true_target; + invert = false; + // Either the false target is fallthrough, or there is no fallthrough + // and both branches must be emitted. + emit_both_branches = (false_target != nullptr); + } + + const auto cond = GenerateTest(condition, invert, codegen_); + + __ B(cond.first, non_fallthrough_target, is_far_target); + + if (emit_both_branches) { + // No target falls through, we need to branch. + __ B(false_target); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instruction, + size_t condition_input_index, + vixl32::Label* true_target, + vixl32::Label* false_target, + bool far_target) { + HInstruction* cond = instruction->InputAt(condition_input_index); + + if (true_target == nullptr && false_target == nullptr) { + // Nothing to do. The code always falls through. + return; + } else if (cond->IsIntConstant()) { + // Constant condition, statically compared against "true" (integer value 1). + if (cond->AsIntConstant()->IsTrue()) { + if (true_target != nullptr) { + __ B(true_target); + } + } else { + DCHECK(cond->AsIntConstant()->IsFalse()) << Int32ConstantFrom(cond); + if (false_target != nullptr) { + __ B(false_target); + } + } + return; + } + + // The following code generates these patterns: + // (1) true_target == nullptr && false_target != nullptr + // - opposite condition true => branch to false_target + // (2) true_target != nullptr && false_target == nullptr + // - condition true => branch to true_target + // (3) true_target != nullptr && false_target != nullptr + // - condition true => branch to true_target + // - branch to false_target + if (IsBooleanValueOrMaterializedCondition(cond)) { + // Condition has been materialized, compare the output to 0. + if (kIsDebugBuild) { + Location cond_val = instruction->GetLocations()->InAt(condition_input_index); + DCHECK(cond_val.IsRegister()); + } + if (true_target == nullptr) { + __ CompareAndBranchIfZero(InputRegisterAt(instruction, condition_input_index), + false_target, + far_target); + } else { + __ CompareAndBranchIfNonZero(InputRegisterAt(instruction, condition_input_index), + true_target, + far_target); + } + } else { + // Condition has not been materialized. Use its inputs as the comparison and + // its condition as the branch condition. + HCondition* condition = cond->AsCondition(); + + // If this is a long or FP comparison that has been folded into + // the HCondition, generate the comparison directly. + DataType::Type type = condition->InputAt(0)->GetType(); + if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { + GenerateCompareTestAndBranch(condition, true_target, false_target, far_target); + return; + } + + vixl32::Label* non_fallthrough_target; + vixl32::Condition arm_cond = vixl32::Condition::None(); + const vixl32::Register left = InputRegisterAt(cond, 0); + const Operand right = InputOperandAt(cond, 1); + + if (true_target == nullptr) { + arm_cond = ARMCondition(condition->GetOppositeCondition()); + non_fallthrough_target = false_target; + } else { + arm_cond = ARMCondition(condition->GetCondition()); + non_fallthrough_target = true_target; + } + + if (right.IsImmediate() && right.GetImmediate() == 0 && (arm_cond.Is(ne) || arm_cond.Is(eq))) { + if (arm_cond.Is(eq)) { + __ CompareAndBranchIfZero(left, non_fallthrough_target, far_target); + } else { + DCHECK(arm_cond.Is(ne)); + __ CompareAndBranchIfNonZero(left, non_fallthrough_target, far_target); + } + } else { + __ Cmp(left, right); + __ B(arm_cond, non_fallthrough_target, far_target); + } + } + + // If neither branch falls through (case 3), the conditional branch to `true_target` + // was already emitted (case 2) and we need to emit a jump to `false_target`. + if (true_target != nullptr && false_target != nullptr) { + __ B(false_target); + } +} + +void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); + if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { + locations->SetInAt(0, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) { + HBasicBlock* true_successor = if_instr->IfTrueSuccessor(); + HBasicBlock* false_successor = if_instr->IfFalseSuccessor(); + vixl32::Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ? + nullptr : codegen_->GetLabelOf(true_successor); + vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? + nullptr : codegen_->GetLabelOf(false_successor); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); +} + +void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetCustomSlowPathCallerSaves(caller_saves); + if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { + locations->SetInAt(0, Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { + SlowPathCodeARMVIXL* slow_path = + deopt_slow_paths_.NewSlowPath(deoptimize); + GenerateTestAndBranch(deoptimize, + /* condition_input_index= */ 0, + slow_path->GetEntryLabel(), + /* false_target= */ nullptr); +} + +void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(flag, LocationSummary::kNoCall); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { + GetAssembler()->LoadFromOffset(kLoadWord, + OutputRegister(flag), + sp, + codegen_->GetStackOffsetOfShouldDeoptimizeFlag()); +} + +void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select); + const bool is_floating_point = DataType::IsFloatingPointType(select->GetType()); + + if (is_floating_point) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::FpuRegisterOrConstant(select->GetTrueValue())); + } else { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Arm8BitEncodableConstantOrRegister(select->GetTrueValue())); + } + + if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) { + locations->SetInAt(2, Location::RegisterOrConstant(select->GetCondition())); + // The code generator handles overlap with the values, but not with the condition. + locations->SetOut(Location::SameAsFirstInput()); + } else if (is_floating_point) { + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } else { + if (!locations->InAt(1).IsConstant()) { + locations->SetInAt(0, Arm8BitEncodableConstantOrRegister(select->GetFalseValue())); + } + + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { + HInstruction* const condition = select->GetCondition(); + const LocationSummary* const locations = select->GetLocations(); + const DataType::Type type = select->GetType(); + const Location first = locations->InAt(0); + const Location out = locations->Out(); + const Location second = locations->InAt(1); + + // In the unlucky case the output of this instruction overlaps + // with an input of an "emitted-at-use-site" condition, and + // the output of this instruction is not one of its inputs, we'll + // need to fallback to branches instead of conditional ARM instructions. + bool output_overlaps_with_condition_inputs = + !IsBooleanValueOrMaterializedCondition(condition) && + !out.Equals(first) && + !out.Equals(second) && + (condition->GetLocations()->InAt(0).Equals(out) || + condition->GetLocations()->InAt(1).Equals(out)); + DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition()); + Location src; + + if (condition->IsIntConstant()) { + if (condition->AsIntConstant()->IsFalse()) { + src = first; + } else { + src = second; + } + + codegen_->MoveLocation(out, src, type); + return; + } + + if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) { + bool invert = false; + + if (out.Equals(second)) { + src = first; + invert = true; + } else if (out.Equals(first)) { + src = second; + } else if (second.IsConstant()) { + DCHECK(CanEncodeConstantAs8BitImmediate(second.GetConstant())); + src = second; + } else if (first.IsConstant()) { + DCHECK(CanEncodeConstantAs8BitImmediate(first.GetConstant())); + src = first; + invert = true; + } else { + src = second; + } + + if (CanGenerateConditionalMove(out, src)) { + if (!out.Equals(first) && !out.Equals(second)) { + codegen_->MoveLocation(out, src.Equals(first) ? second : first, type); + } + + std::pair cond(eq, ne); + + if (IsBooleanValueOrMaterializedCondition(condition)) { + __ Cmp(InputRegisterAt(select, 2), 0); + cond = invert ? std::make_pair(eq, ne) : std::make_pair(ne, eq); + } else { + cond = GenerateTest(condition->AsCondition(), invert, codegen_); + } + + const size_t instr_count = out.IsRegisterPair() ? 4 : 2; + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(GetVIXLAssembler(), + instr_count * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + if (out.IsRegister()) { + __ it(cond.first); + __ mov(cond.first, RegisterFrom(out), OperandFrom(src, type)); + } else { + DCHECK(out.IsRegisterPair()); + + Operand operand_high(0); + Operand operand_low(0); + + if (src.IsConstant()) { + const int64_t value = Int64ConstantFrom(src); + + operand_high = High32Bits(value); + operand_low = Low32Bits(value); + } else { + DCHECK(src.IsRegisterPair()); + operand_high = HighRegisterFrom(src); + operand_low = LowRegisterFrom(src); + } + + __ it(cond.first); + __ mov(cond.first, LowRegisterFrom(out), operand_low); + __ it(cond.first); + __ mov(cond.first, HighRegisterFrom(out), operand_high); + } + + return; + } + } + + vixl32::Label* false_target = nullptr; + vixl32::Label* true_target = nullptr; + vixl32::Label select_end; + vixl32::Label other_case; + vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end); + + if (out.Equals(second)) { + true_target = target; + src = first; + } else { + false_target = target; + src = second; + + if (!out.Equals(first)) { + if (output_overlaps_with_condition_inputs) { + false_target = &other_case; + } else { + codegen_->MoveLocation(out, first, type); + } + } + } + + GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false); + codegen_->MoveLocation(out, src, type); + if (output_overlaps_with_condition_inputs) { + __ B(target); + __ Bind(&other_case); + codegen_->MoveLocation(out, first, type); + } + + if (select_end.IsReferenced()) { + __ Bind(&select_end); + } +} + +void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) { + new (GetGraph()->GetAllocator()) LocationSummary(info); +} + +void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) { + // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile. +} + +void CodeGeneratorARMVIXL::GenerateNop() { + __ Nop(); +} + +// `temp` is an extra temporary register that is used for some conditions; +// callers may not specify it, in which case the method will use a scratch +// register instead. +void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition, + vixl32::Register out, + vixl32::Register in, + vixl32::Register temp) { + switch (condition) { + case kCondEQ: + // x <= 0 iff x == 0 when the comparison is unsigned. + case kCondBE: + if (!temp.IsValid() || (out.IsLow() && !out.Is(in))) { + temp = out; + } + + // Avoid 32-bit instructions if possible; note that `in` and `temp` must be + // different as well. + if (in.IsLow() && temp.IsLow() && !in.Is(temp)) { + // temp = - in; only 0 sets the carry flag. + __ Rsbs(temp, in, 0); + + if (out.Is(in)) { + std::swap(in, temp); + } + + // out = - in + in + carry = carry + __ Adc(out, temp, in); + } else { + // If `in` is 0, then it has 32 leading zeros, and less than that otherwise. + __ Clz(out, in); + // Any number less than 32 logically shifted right by 5 bits results in 0; + // the same operation on 32 yields 1. + __ Lsr(out, out, 5); + } + + break; + case kCondNE: + // x > 0 iff x != 0 when the comparison is unsigned. + case kCondA: { + UseScratchRegisterScope temps(GetVIXLAssembler()); + + if (out.Is(in)) { + if (!temp.IsValid() || in.Is(temp)) { + temp = temps.Acquire(); + } + } else if (!temp.IsValid() || !temp.IsLow()) { + temp = out; + } + + // temp = in - 1; only 0 does not set the carry flag. + __ Subs(temp, in, 1); + // out = in + ~temp + carry = in + (-(in - 1) - 1) + carry = in - in + 1 - 1 + carry = carry + __ Sbc(out, in, temp); + break; + } + case kCondGE: + __ Mvn(out, in); + in = out; + FALLTHROUGH_INTENDED; + case kCondLT: + // We only care about the sign bit. + __ Lsr(out, in, 31); + break; + case kCondAE: + // Trivially true. + __ Mov(out, 1); + break; + case kCondB: + // Trivially false. + __ Mov(out, 0); + break; + default: + LOG(FATAL) << "Unexpected condition " << condition; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall); + const DataType::Type type = cond->InputAt(0)->GetType(); + if (DataType::IsFloatingPointType(type)) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1))); + } else { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); + } + if (!cond->IsEmittedAtUseSite()) { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } +} + +void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) { + if (cond->IsEmittedAtUseSite()) { + return; + } + + const DataType::Type type = cond->GetLeft()->GetType(); + + if (DataType::IsFloatingPointType(type)) { + GenerateConditionGeneric(cond, codegen_); + return; + } + + DCHECK(DataType::IsIntegralType(type) || type == DataType::Type::kReference) << type; + + const IfCondition condition = cond->GetCondition(); + + // A condition with only one boolean input, or two boolean inputs without being equality or + // inequality results from transformations done by the instruction simplifier, and is handled + // as a regular condition with integral inputs. + if (type == DataType::Type::kBool && + cond->GetRight()->GetType() == DataType::Type::kBool && + (condition == kCondEQ || condition == kCondNE)) { + vixl32::Register left = InputRegisterAt(cond, 0); + const vixl32::Register out = OutputRegister(cond); + const Location right_loc = cond->GetLocations()->InAt(1); + + // The constant case is handled by the instruction simplifier. + DCHECK(!right_loc.IsConstant()); + + vixl32::Register right = RegisterFrom(right_loc); + + // Avoid 32-bit instructions if possible. + if (out.Is(right)) { + std::swap(left, right); + } + + __ Eor(out, left, right); + + if (condition == kCondEQ) { + __ Eor(out, out, 1); + } + + return; + } + + GenerateConditionIntegralOrNonPrimitive(cond, codegen_); +} + +void LocationsBuilderARMVIXL::VisitEqual(HEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitEqual(HEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitNotEqual(HNotEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitNotEqual(HNotEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitLessThan(HLessThan* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitLessThan(HLessThan* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitGreaterThan(HGreaterThan* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitGreaterThan(HGreaterThan* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitBelow(HBelow* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitBelow(HBelow* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitAbove(HAbove* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitAbove(HAbove* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) { + HandleCondition(comp); +} + +void InstructionCodeGeneratorARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) { + HandleCondition(comp); +} + +void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARMVIXL::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARMVIXL::VisitNullConstant(HNullConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARMVIXL::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARMVIXL::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARMVIXL::VisitFloatConstant( + HFloatConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(constant)); +} + +void InstructionCodeGeneratorARMVIXL::VisitDoubleConstant( + HDoubleConstant* constant ATTRIBUTE_UNUSED) { + // Will be generated at use site. +} + +void LocationsBuilderARMVIXL::VisitConstructorFence(HConstructorFence* constructor_fence) { + constructor_fence->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitConstructorFence( + HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); +} + +void LocationsBuilderARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { + memory_barrier->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { + codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); +} + +void LocationsBuilderARMVIXL::VisitReturnVoid(HReturnVoid* ret) { + ret->SetLocations(nullptr); +} + +void InstructionCodeGeneratorARMVIXL::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) { + codegen_->GenerateFrameExit(); +} + +void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); + locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType())); +} + +void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret) { + if (GetGraph()->IsCompilingOsr()) { + // To simplify callers of an OSR method, we put the return value in both + // floating point and core registers. + switch (ret->InputAt(0)->GetType()) { + case DataType::Type::kFloat32: + __ Vmov(r0, s0); + break; + case DataType::Type::kFloat64: + __ Vmov(r0, r1, d0); + break; + default: + break; + } + } + codegen_->GenerateFrameExit(); +} + +void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { + // The trampoline uses the same calling convention as dex calling conventions, + // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain + // the method_idx. + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { + codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3); +} + +void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { + // Explicit clinit checks triggered by static invokes must have been pruned by + // art::PrepareForRegisterAllocation. + DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); + + IntrinsicLocationsBuilderARMVIXL intrinsic(codegen_); + if (intrinsic.TryDispatch(invoke)) { + return; + } + + HandleInvoke(invoke); +} + +static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) { + if (invoke->GetLocations()->Intrinsified()) { + IntrinsicCodeGeneratorARMVIXL intrinsic(codegen); + intrinsic.Dispatch(invoke); + return true; + } + return false; +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { + // Explicit clinit checks triggered by static invokes must have been pruned by + // art::PrepareForRegisterAllocation. + DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); + + if (TryGenerateIntrinsicCode(invoke, codegen_)) { + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4); + return; + } + + LocationSummary* locations = invoke->GetLocations(); + codegen_->GenerateStaticOrDirectCall( + invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation()); + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5); +} + +void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) { + InvokeDexCallingConventionVisitorARMVIXL calling_convention_visitor; + CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor); +} + +void LocationsBuilderARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) { + IntrinsicLocationsBuilderARMVIXL intrinsic(codegen_); + if (intrinsic.TryDispatch(invoke)) { + return; + } + + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) { + if (TryGenerateIntrinsicCode(invoke, codegen_)) { + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6); + return; + } + + codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0)); + DCHECK(!codegen_->IsLeafMethod()); + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7); +} + +void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) { + HandleInvoke(invoke); + // Add the hidden argument. + invoke->GetLocations()->AddTemp(LocationFrom(r12)); +} + +void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction, + vixl32::Register klass) { + DCHECK_EQ(r0.GetCode(), klass.GetCode()); + // We know the destination of an intrinsic, so no need to record inline + // caches. + if (!instruction->GetLocations()->Intrinsified() && + GetGraph()->IsCompilingBaseline() && + !Runtime::Current()->IsAotCompiler()) { + DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + ScopedObjectAccess soa(Thread::Current()); + ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize); + if (info != nullptr) { + InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); + uint32_t address = reinterpret_cast32(cache); + vixl32::Label done; + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + __ Mov(r4, address); + __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass, ip); + __ B(eq, &done, /* is_far_target= */ false); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } + } +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) { + // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. + LocationSummary* locations = invoke->GetLocations(); + vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); + vixl32::Register hidden_reg = RegisterFrom(locations->GetTemp(1)); + Location receiver = locations->InAt(0); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + + DCHECK(!receiver.IsStackSlot()); + + // Ensure the pc position is recorded immediately after the `ldr` instruction. + { + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + // /* HeapReference */ temp = receiver->klass_ + __ ldr(temp, MemOperand(RegisterFrom(receiver), class_offset)); + codegen_->MaybeRecordImplicitNullCheck(invoke); + } + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + GetAssembler()->MaybeUnpoisonHeapReference(temp); + + // If we're compiling baseline, update the inline cache. + codegen_->MaybeGenerateInlineCacheCheck(invoke, temp); + + GetAssembler()->LoadFromOffset(kLoadWord, + temp, + temp, + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + + uint32_t method_offset = static_cast(ImTable::OffsetOfElement( + invoke->GetImtIndex(), kArmPointerSize)); + // temp = temp->GetImtEntryAt(method_offset); + GetAssembler()->LoadFromOffset(kLoadWord, temp, temp, method_offset); + uint32_t entry_point = + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value(); + // LR = temp->GetEntryPoint(); + GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point); + + // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other + // instruction from clobbering it as they might use r12 as a scratch register. + DCHECK(hidden_reg.Is(r12)); + + { + // The VIXL macro assembler may clobber any of the scratch registers that are available to it, + // so it checks if the application is using them (by passing them to the macro assembler + // methods). The following application of UseScratchRegisterScope corrects VIXL's notion of + // what is available, and is the opposite of the standard usage: Instead of requesting a + // temporary location, it imposes an external constraint (i.e. a specific register is reserved + // for the hidden argument). Note that this works even if VIXL needs a scratch register itself + // (to materialize the constant), since the destination register becomes available for such use + // internally for the duration of the macro instruction. + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(hidden_reg); + __ Mov(hidden_reg, invoke->GetDexMethodIndex()); + } + { + // Ensure the pc position is recorded immediately after the `blx` instruction. + // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + // LR(); + __ blx(lr); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + DCHECK(!codegen_->IsLeafMethod()); + } + + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8); +} + +void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { + codegen_->GenerateInvokePolymorphicCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9); +} + +void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) { + HandleInvoke(invoke); +} + +void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) { + codegen_->GenerateInvokeCustomCall(invoke); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10); +} + +void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall); + switch (neg->GetResultType()) { + case DataType::Type::kInt32: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) { + LocationSummary* locations = neg->GetLocations(); + Location out = locations->Out(); + Location in = locations->InAt(0); + switch (neg->GetResultType()) { + case DataType::Type::kInt32: + __ Rsb(OutputRegister(neg), InputRegisterAt(neg, 0), 0); + break; + + case DataType::Type::kInt64: + // out.lo = 0 - in.lo (and update the carry/borrow (C) flag) + __ Rsbs(LowRegisterFrom(out), LowRegisterFrom(in), 0); + // We cannot emit an RSC (Reverse Subtract with Carry) + // instruction here, as it does not exist in the Thumb-2 + // instruction set. We use the following approach + // using SBC and SUB instead. + // + // out.hi = -C + __ Sbc(HighRegisterFrom(out), HighRegisterFrom(out), HighRegisterFrom(out)); + // out.hi = out.hi - in.hi + __ Sub(HighRegisterFrom(out), HighRegisterFrom(out), HighRegisterFrom(in)); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vneg(OutputVRegister(neg), InputVRegister(neg)); + break; + + default: + LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) + << input_type << " -> " << result_type; + + // The float-to-long, double-to-long and long-to-float type conversions + // rely on a call to the runtime. + LocationSummary::CallKind call_kind = + (((input_type == DataType::Type::kFloat32 || input_type == DataType::Type::kFloat64) + && result_type == DataType::Type::kInt64) + || (input_type == DataType::Type::kInt64 && result_type == DataType::Type::kFloat32)) + ? LocationSummary::kCallOnMainOnly + : LocationSummary::kNoCall; + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind); + + switch (result_type) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK(DataType::IsIntegralType(input_type)) << input_type; + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kInt32: + switch (input_type) { + case DataType::Type::kInt64: + locations->SetInAt(0, Location::Any()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + break; + + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kInt64: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + + case DataType::Type::kFloat32: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); + locations->SetOut(LocationFrom(r0, r1)); + break; + } + + case DataType::Type::kFloat64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0), + calling_convention.GetFpuRegisterAt(1))); + locations->SetOut(LocationFrom(r0, r1)); + break; + } + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kFloat32: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + + case DataType::Type::kInt64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0), + calling_convention.GetRegisterAt(1))); + locations->SetOut(LocationFrom(calling_convention.GetFpuRegisterAt(0))); + break; + } + + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kFloat64: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + break; + + case DataType::Type::kFloat32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } +} + +void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conversion) { + LocationSummary* locations = conversion->GetLocations(); + Location out = locations->Out(); + Location in = locations->InAt(0); + DataType::Type result_type = conversion->GetResultType(); + DataType::Type input_type = conversion->GetInputType(); + DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type)) + << input_type << " -> " << result_type; + switch (result_type) { + case DataType::Type::kUint8: + switch (input_type) { + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8); + break; + case DataType::Type::kInt64: + __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kInt8: + switch (input_type) { + case DataType::Type::kUint8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8); + break; + case DataType::Type::kInt64: + __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kUint16: + switch (input_type) { + case DataType::Type::kInt8: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16); + break; + case DataType::Type::kInt64: + __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kInt16: + switch (input_type) { + case DataType::Type::kUint16: + case DataType::Type::kInt32: + __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16); + break; + case DataType::Type::kInt64: + __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kInt32: + switch (input_type) { + case DataType::Type::kInt64: + DCHECK(out.IsRegister()); + if (in.IsRegisterPair()) { + __ Mov(OutputRegister(conversion), LowRegisterFrom(in)); + } else if (in.IsDoubleStackSlot()) { + GetAssembler()->LoadFromOffset(kLoadWord, + OutputRegister(conversion), + sp, + in.GetStackIndex()); + } else { + DCHECK(in.IsConstant()); + DCHECK(in.GetConstant()->IsLongConstant()); + int64_t value = in.GetConstant()->AsLongConstant()->GetValue(); + __ Mov(OutputRegister(conversion), static_cast(value)); + } + break; + + case DataType::Type::kFloat32: { + vixl32::SRegister temp = LowSRegisterFrom(locations->GetTemp(0)); + __ Vcvt(S32, F32, temp, InputSRegisterAt(conversion, 0)); + __ Vmov(OutputRegister(conversion), temp); + break; + } + + case DataType::Type::kFloat64: { + vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0)); + __ Vcvt(S32, F64, temp_s, DRegisterFrom(in)); + __ Vmov(OutputRegister(conversion), temp_s); + break; + } + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kInt64: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + DCHECK(out.IsRegisterPair()); + DCHECK(in.IsRegister()); + __ Mov(LowRegisterFrom(out), InputRegisterAt(conversion, 0)); + // Sign extension. + __ Asr(HighRegisterFrom(out), LowRegisterFrom(out), 31); + break; + + case DataType::Type::kFloat32: + codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc()); + CheckEntrypointTypes(); + break; + + case DataType::Type::kFloat64: + codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc()); + CheckEntrypointTypes(); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kFloat32: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0)); + __ Vcvt(F32, S32, OutputSRegister(conversion), OutputSRegister(conversion)); + break; + + case DataType::Type::kInt64: + codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc()); + CheckEntrypointTypes(); + break; + + case DataType::Type::kFloat64: + __ Vcvt(F32, F64, OutputSRegister(conversion), DRegisterFrom(in)); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + case DataType::Type::kFloat64: + switch (input_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0)); + __ Vcvt(F64, S32, DRegisterFrom(out), LowSRegisterFrom(out)); + break; + + case DataType::Type::kInt64: { + vixl32::Register low = LowRegisterFrom(in); + vixl32::Register high = HighRegisterFrom(in); + vixl32::SRegister out_s = LowSRegisterFrom(out); + vixl32::DRegister out_d = DRegisterFrom(out); + vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0)); + vixl32::DRegister temp_d = DRegisterFrom(locations->GetTemp(0)); + vixl32::DRegister constant_d = DRegisterFrom(locations->GetTemp(1)); + + // temp_d = int-to-double(high) + __ Vmov(temp_s, high); + __ Vcvt(F64, S32, temp_d, temp_s); + // constant_d = k2Pow32EncodingForDouble + __ Vmov(constant_d, bit_cast(k2Pow32EncodingForDouble)); + // out_d = unsigned-to-double(low) + __ Vmov(out_s, low); + __ Vcvt(F64, U32, out_d, out_s); + // out_d += temp_d * constant_d + __ Vmla(F64, out_d, temp_d, constant_d); + break; + } + + case DataType::Type::kFloat32: + __ Vcvt(F64, F32, DRegisterFrom(out), InputSRegisterAt(conversion, 0)); + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } + break; + + default: + LOG(FATAL) << "Unexpected type conversion from " << input_type + << " to " << result_type; + } +} + +void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall); + switch (add->GetResultType()) { + case DataType::Type::kInt32: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ArmEncodableConstantOrRegister(add->InputAt(1), ADD)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + } + + default: + LOG(FATAL) << "Unexpected add type " << add->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) { + LocationSummary* locations = add->GetLocations(); + Location out = locations->Out(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + + switch (add->GetResultType()) { + case DataType::Type::kInt32: { + __ Add(OutputRegister(add), InputRegisterAt(add, 0), InputOperandAt(add, 1)); + } + break; + + case DataType::Type::kInt64: { + if (second.IsConstant()) { + uint64_t value = static_cast(Int64FromConstant(second.GetConstant())); + GenerateAddLongConst(out, first, value); + } else { + DCHECK(second.IsRegisterPair()); + __ Adds(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second)); + __ Adc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second)); + } + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vadd(OutputVRegister(add), InputVRegisterAt(add, 0), InputVRegisterAt(add, 1)); + break; + + default: + LOG(FATAL) << "Unexpected add type " << add->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitSub(HSub* sub) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall); + switch (sub->GetResultType()) { + case DataType::Type::kInt32: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ArmEncodableConstantOrRegister(sub->InputAt(1), SUB)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + } + default: + LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) { + LocationSummary* locations = sub->GetLocations(); + Location out = locations->Out(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + switch (sub->GetResultType()) { + case DataType::Type::kInt32: { + __ Sub(OutputRegister(sub), InputRegisterAt(sub, 0), InputOperandAt(sub, 1)); + break; + } + + case DataType::Type::kInt64: { + if (second.IsConstant()) { + uint64_t value = static_cast(Int64FromConstant(second.GetConstant())); + GenerateAddLongConst(out, first, -value); + } else { + DCHECK(second.IsRegisterPair()); + __ Subs(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second)); + __ Sbc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second)); + } + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vsub(OutputVRegister(sub), InputVRegisterAt(sub, 0), InputVRegisterAt(sub, 1)); + break; + + default: + LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitMul(HMul* mul) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall); + switch (mul->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + } + + default: + LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) { + LocationSummary* locations = mul->GetLocations(); + Location out = locations->Out(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + switch (mul->GetResultType()) { + case DataType::Type::kInt32: { + __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); + break; + } + case DataType::Type::kInt64: { + vixl32::Register out_hi = HighRegisterFrom(out); + vixl32::Register out_lo = LowRegisterFrom(out); + vixl32::Register in1_hi = HighRegisterFrom(first); + vixl32::Register in1_lo = LowRegisterFrom(first); + vixl32::Register in2_hi = HighRegisterFrom(second); + vixl32::Register in2_lo = LowRegisterFrom(second); + + // Extra checks to protect caused by the existence of R1_R2. + // The algorithm is wrong if out.hi is either in1.lo or in2.lo: + // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2); + DCHECK(!out_hi.Is(in1_lo)); + DCHECK(!out_hi.Is(in2_lo)); + + // input: in1 - 64 bits, in2 - 64 bits + // output: out + // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo + // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32] + // parts: out.lo = (in1.lo * in2.lo)[31:0] + + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + // temp <- in1.lo * in2.hi + __ Mul(temp, in1_lo, in2_hi); + // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo + __ Mla(out_hi, in1_hi, in2_lo, temp); + // out.lo <- (in1.lo * in2.lo)[31:0]; + __ Umull(out_lo, temp, in1_lo, in2_lo); + // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32] + __ Add(out_hi, out_hi, temp); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vmul(OutputVRegister(mul), InputVRegisterAt(mul, 0), InputVRegisterAt(mul, 1)); + break; + + default: + LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsRem()); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); + + Location second = instruction->GetLocations()->InAt(1); + DCHECK(second.IsConstant()); + + vixl32::Register out = OutputRegister(instruction); + vixl32::Register dividend = InputRegisterAt(instruction, 0); + int32_t imm = Int32ConstantFrom(second); + DCHECK(imm == 1 || imm == -1); + + if (instruction->IsRem()) { + __ Mov(out, 0); + } else { + if (imm == 1) { + __ Mov(out, dividend); + } else { + __ Rsb(out, dividend, 0); + } + } +} + +void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsRem()); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); + + LocationSummary* locations = instruction->GetLocations(); + Location second = locations->InAt(1); + DCHECK(second.IsConstant()); + + vixl32::Register out = OutputRegister(instruction); + vixl32::Register dividend = InputRegisterAt(instruction, 0); + vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); + int32_t imm = Int32ConstantFrom(second); + uint32_t abs_imm = static_cast(AbsOrMin(imm)); + int ctz_imm = CTZ(abs_imm); + + if (ctz_imm == 1) { + __ Lsr(temp, dividend, 32 - ctz_imm); + } else { + __ Asr(temp, dividend, 31); + __ Lsr(temp, temp, 32 - ctz_imm); + } + __ Add(out, temp, dividend); + + if (instruction->IsDiv()) { + __ Asr(out, out, ctz_imm); + if (imm < 0) { + __ Rsb(out, out, 0); + } + } else { + __ Ubfx(out, out, 0, ctz_imm); + __ Sub(out, out, temp); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsRem()); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); + + LocationSummary* locations = instruction->GetLocations(); + Location second = locations->InAt(1); + DCHECK(second.IsConstant()); + + vixl32::Register out = OutputRegister(instruction); + vixl32::Register dividend = InputRegisterAt(instruction, 0); + vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0)); + vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1)); + int32_t imm = Int32ConstantFrom(second); + + int64_t magic; + int shift; + CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); + + // TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed. + __ Mov(temp1, static_cast(magic)); + __ Smull(temp2, temp1, dividend, temp1); + + if (imm > 0 && magic < 0) { + __ Add(temp1, temp1, dividend); + } else if (imm < 0 && magic > 0) { + __ Sub(temp1, temp1, dividend); + } + + if (shift != 0) { + __ Asr(temp1, temp1, shift); + } + + if (instruction->IsDiv()) { + __ Sub(out, temp1, Operand(temp1, vixl32::Shift(ASR), 31)); + } else { + __ Sub(temp1, temp1, Operand(temp1, vixl32::Shift(ASR), 31)); + // TODO: Strength reduction for mls. + __ Mov(temp2, imm); + __ Mls(out, temp1, temp2, dividend); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral( + HBinaryOperation* instruction) { + DCHECK(instruction->IsDiv() || instruction->IsRem()); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32); + + Location second = instruction->GetLocations()->InAt(1); + DCHECK(second.IsConstant()); + + int32_t imm = Int32ConstantFrom(second); + if (imm == 0) { + // Do not generate anything. DivZeroCheck would prevent any code to be executed. + } else if (imm == 1 || imm == -1) { + DivRemOneOrMinusOne(instruction); + } else if (IsPowerOfTwo(AbsOrMin(imm))) { + DivRemByPowerOfTwo(instruction); + } else { + DCHECK(imm <= -2 || imm >= 2); + GenerateDivRemWithAnyConstant(instruction); + } +} + +void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) { + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + if (div->GetResultType() == DataType::Type::kInt64) { + // pLdiv runtime call. + call_kind = LocationSummary::kCallOnMainOnly; + } else if (div->GetResultType() == DataType::Type::kInt32 && div->InputAt(1)->IsConstant()) { + // sdiv will be replaced by other instruction sequence. + } else if (div->GetResultType() == DataType::Type::kInt32 && + !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + // pIdivmod runtime call. + call_kind = LocationSummary::kCallOnMainOnly; + } + + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind); + + switch (div->GetResultType()) { + case DataType::Type::kInt32: { + if (div->InputAt(1)->IsConstant()) { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant())); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + int32_t value = Int32ConstantFrom(div->InputAt(1)); + if (value == 1 || value == 0 || value == -1) { + // No temp register required. + } else { + locations->AddTemp(Location::RequiresRegister()); + if (!IsPowerOfTwo(AbsOrMin(value))) { + locations->AddTemp(Location::RequiresRegister()); + } + } + } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } else { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); + // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but + // we only need the former. + locations->SetOut(LocationFrom(r0)); + } + break; + } + case DataType::Type::kInt64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom( + calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); + locations->SetInAt(1, LocationFrom( + calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); + locations->SetOut(LocationFrom(r0, r1)); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + } + + default: + LOG(FATAL) << "Unexpected div type " << div->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) { + Location lhs = div->GetLocations()->InAt(0); + Location rhs = div->GetLocations()->InAt(1); + + switch (div->GetResultType()) { + case DataType::Type::kInt32: { + if (rhs.IsConstant()) { + GenerateDivRemConstantIntegral(div); + } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); + } else { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + DCHECK(calling_convention.GetRegisterAt(0).Is(RegisterFrom(lhs))); + DCHECK(calling_convention.GetRegisterAt(1).Is(RegisterFrom(rhs))); + DCHECK(r0.Is(OutputRegister(div))); + + codegen_->InvokeRuntime(kQuickIdivmod, div, div->GetDexPc()); + CheckEntrypointTypes(); + } + break; + } + + case DataType::Type::kInt64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + DCHECK(calling_convention.GetRegisterAt(0).Is(LowRegisterFrom(lhs))); + DCHECK(calling_convention.GetRegisterAt(1).Is(HighRegisterFrom(lhs))); + DCHECK(calling_convention.GetRegisterAt(2).Is(LowRegisterFrom(rhs))); + DCHECK(calling_convention.GetRegisterAt(3).Is(HighRegisterFrom(rhs))); + DCHECK(LowRegisterFrom(div->GetLocations()->Out()).Is(r0)); + DCHECK(HighRegisterFrom(div->GetLocations()->Out()).Is(r1)); + + codegen_->InvokeRuntime(kQuickLdiv, div, div->GetDexPc()); + CheckEntrypointTypes(); + break; + } + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vdiv(OutputVRegister(div), InputVRegisterAt(div, 0), InputVRegisterAt(div, 1)); + break; + + default: + LOG(FATAL) << "Unexpected div type " << div->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitRem(HRem* rem) { + DataType::Type type = rem->GetResultType(); + + // Most remainders are implemented in the runtime. + LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly; + if (rem->GetResultType() == DataType::Type::kInt32 && rem->InputAt(1)->IsConstant()) { + // sdiv will be replaced by other instruction sequence. + call_kind = LocationSummary::kNoCall; + } else if ((rem->GetResultType() == DataType::Type::kInt32) + && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + // Have hardware divide instruction for int, do it with three instructions. + call_kind = LocationSummary::kNoCall; + } + + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind); + + switch (type) { + case DataType::Type::kInt32: { + if (rem->InputAt(1)->IsConstant()) { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant())); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + int32_t value = Int32ConstantFrom(rem->InputAt(1)); + if (value == 1 || value == 0 || value == -1) { + // No temp register required. + } else { + locations->AddTemp(Location::RequiresRegister()); + if (!IsPowerOfTwo(AbsOrMin(value))) { + locations->AddTemp(Location::RequiresRegister()); + } + } + } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + locations->AddTemp(Location::RequiresRegister()); + } else { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); + // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but + // we only need the latter. + locations->SetOut(LocationFrom(r1)); + } + break; + } + case DataType::Type::kInt64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom( + calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); + locations->SetInAt(1, LocationFrom( + calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); + // The runtime helper puts the output in R2,R3. + locations->SetOut(LocationFrom(r2, r3)); + break; + } + case DataType::Type::kFloat32: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); + locations->SetOut(LocationFrom(s0)); + break; + } + + case DataType::Type::kFloat64: { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom( + calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1))); + locations->SetInAt(1, LocationFrom( + calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3))); + locations->SetOut(LocationFrom(s0, s1)); + break; + } + + default: + LOG(FATAL) << "Unexpected rem type " << type; + } +} + +void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) { + LocationSummary* locations = rem->GetLocations(); + Location second = locations->InAt(1); + + DataType::Type type = rem->GetResultType(); + switch (type) { + case DataType::Type::kInt32: { + vixl32::Register reg1 = InputRegisterAt(rem, 0); + vixl32::Register out_reg = OutputRegister(rem); + if (second.IsConstant()) { + GenerateDivRemConstantIntegral(rem); + } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { + vixl32::Register reg2 = RegisterFrom(second); + vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); + + // temp = reg1 / reg2 (integer division) + // dest = reg1 - temp * reg2 + __ Sdiv(temp, reg1, reg2); + __ Mls(out_reg, temp, reg2, reg1); + } else { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + DCHECK(reg1.Is(calling_convention.GetRegisterAt(0))); + DCHECK(RegisterFrom(second).Is(calling_convention.GetRegisterAt(1))); + DCHECK(out_reg.Is(r1)); + + codegen_->InvokeRuntime(kQuickIdivmod, rem, rem->GetDexPc()); + CheckEntrypointTypes(); + } + break; + } + + case DataType::Type::kInt64: { + codegen_->InvokeRuntime(kQuickLmod, rem, rem->GetDexPc()); + CheckEntrypointTypes(); + break; + } + + case DataType::Type::kFloat32: { + codegen_->InvokeRuntime(kQuickFmodf, rem, rem->GetDexPc()); + CheckEntrypointTypes(); + break; + } + + case DataType::Type::kFloat64: { + codegen_->InvokeRuntime(kQuickFmod, rem, rem->GetDexPc()); + CheckEntrypointTypes(); + break; + } + + default: + LOG(FATAL) << "Unexpected rem type " << type; + } +} + +static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) { + LocationSummary* locations = new (allocator) LocationSummary(minmax); + switch (minmax->GetResultType()) { + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + case DataType::Type::kFloat32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + locations->AddTemp(Location::RequiresRegister()); + break; + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateMinMaxInt(LocationSummary* locations, bool is_min) { + Location op1_loc = locations->InAt(0); + Location op2_loc = locations->InAt(1); + Location out_loc = locations->Out(); + + vixl32::Register op1 = RegisterFrom(op1_loc); + vixl32::Register op2 = RegisterFrom(op2_loc); + vixl32::Register out = RegisterFrom(out_loc); + + __ Cmp(op1, op2); + + { + ExactAssemblyScope aas(GetVIXLAssembler(), + 3 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + + __ ite(is_min ? lt : gt); + __ mov(is_min ? lt : gt, out, op1); + __ mov(is_min ? ge : le, out, op2); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateMinMaxLong(LocationSummary* locations, bool is_min) { + Location op1_loc = locations->InAt(0); + Location op2_loc = locations->InAt(1); + Location out_loc = locations->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder. + return; + } + + vixl32::Register op1_lo = LowRegisterFrom(op1_loc); + vixl32::Register op1_hi = HighRegisterFrom(op1_loc); + vixl32::Register op2_lo = LowRegisterFrom(op2_loc); + vixl32::Register op2_hi = HighRegisterFrom(op2_loc); + vixl32::Register out_lo = LowRegisterFrom(out_loc); + vixl32::Register out_hi = HighRegisterFrom(out_loc); + UseScratchRegisterScope temps(GetVIXLAssembler()); + const vixl32::Register temp = temps.Acquire(); + + DCHECK(op1_lo.Is(out_lo)); + DCHECK(op1_hi.Is(out_hi)); + + // Compare op1 >= op2, or op1 < op2. + __ Cmp(out_lo, op2_lo); + __ Sbcs(temp, out_hi, op2_hi); + + // Now GE/LT condition code is correct for the long comparison. + { + vixl32::ConditionType cond = is_min ? ge : lt; + ExactAssemblyScope it_scope(GetVIXLAssembler(), + 3 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ itt(cond); + __ mov(cond, out_lo, op2_lo); + __ mov(cond, out_hi, op2_hi); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax, bool is_min) { + LocationSummary* locations = minmax->GetLocations(); + Location op1_loc = locations->InAt(0); + Location op2_loc = locations->InAt(1); + Location out_loc = locations->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder. + return; + } + + vixl32::SRegister op1 = SRegisterFrom(op1_loc); + vixl32::SRegister op2 = SRegisterFrom(op2_loc); + vixl32::SRegister out = SRegisterFrom(out_loc); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + const vixl32::Register temp1 = temps.Acquire(); + vixl32::Register temp2 = RegisterFrom(locations->GetTemp(0)); + vixl32::Label nan, done; + vixl32::Label* final_label = codegen_->GetFinalLabel(minmax, &done); + + DCHECK(op1.Is(out)); + + __ Vcmp(op1, op2); + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + __ B(vs, &nan, /* is_far_target= */ false); // if un-ordered, go to NaN handling. + + // op1 <> op2 + vixl32::ConditionType cond = is_min ? gt : lt; + { + ExactAssemblyScope it_scope(GetVIXLAssembler(), + 2 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(cond); + __ vmov(cond, F32, out, op2); + } + // for <>(not equal), we've done min/max calculation. + __ B(ne, final_label, /* is_far_target= */ false); + + // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0). + __ Vmov(temp1, op1); + __ Vmov(temp2, op2); + if (is_min) { + __ Orr(temp1, temp1, temp2); + } else { + __ And(temp1, temp1, temp2); + } + __ Vmov(out, temp1); + __ B(final_label); + + // handle NaN input. + __ Bind(&nan); + __ Movt(temp1, High16Bits(kNanFloat)); // 0x7FC0xxxx is a NaN. + __ Vmov(out, temp1); + + if (done.IsReferenced()) { + __ Bind(&done); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax, bool is_min) { + LocationSummary* locations = minmax->GetLocations(); + Location op1_loc = locations->InAt(0); + Location op2_loc = locations->InAt(1); + Location out_loc = locations->Out(); + + // Optimization: don't generate any code if inputs are the same. + if (op1_loc.Equals(op2_loc)) { + DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in. + return; + } + + vixl32::DRegister op1 = DRegisterFrom(op1_loc); + vixl32::DRegister op2 = DRegisterFrom(op2_loc); + vixl32::DRegister out = DRegisterFrom(out_loc); + vixl32::Label handle_nan_eq, done; + vixl32::Label* final_label = codegen_->GetFinalLabel(minmax, &done); + + DCHECK(op1.Is(out)); + + __ Vcmp(op1, op2); + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + __ B(vs, &handle_nan_eq, /* is_far_target= */ false); // if un-ordered, go to NaN handling. + + // op1 <> op2 + vixl32::ConditionType cond = is_min ? gt : lt; + { + ExactAssemblyScope it_scope(GetVIXLAssembler(), + 2 * kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(cond); + __ vmov(cond, F64, out, op2); + } + // for <>(not equal), we've done min/max calculation. + __ B(ne, final_label, /* is_far_target= */ false); + + // handle op1 == op2, max(+0.0,-0.0). + if (!is_min) { + __ Vand(F64, out, op1, op2); + __ B(final_label); + } + + // handle op1 == op2, min(+0.0,-0.0), NaN input. + __ Bind(&handle_nan_eq); + __ Vorr(F64, out, op1, op2); // assemble op1/-0.0/NaN. + + if (done.IsReferenced()) { + __ Bind(&done); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateMinMax(HBinaryOperation* minmax, bool is_min) { + DataType::Type type = minmax->GetResultType(); + switch (type) { + case DataType::Type::kInt32: + GenerateMinMaxInt(minmax->GetLocations(), is_min); + break; + case DataType::Type::kInt64: + GenerateMinMaxLong(minmax->GetLocations(), is_min); + break; + case DataType::Type::kFloat32: + GenerateMinMaxFloat(minmax, is_min); + break; + case DataType::Type::kFloat64: + GenerateMinMaxDouble(minmax, is_min); + break; + default: + LOG(FATAL) << "Unexpected type for HMinMax " << type; + } +} + +void LocationsBuilderARMVIXL::VisitMin(HMin* min) { + CreateMinMaxLocations(GetGraph()->GetAllocator(), min); +} + +void InstructionCodeGeneratorARMVIXL::VisitMin(HMin* min) { + GenerateMinMax(min, /*is_min*/ true); +} + +void LocationsBuilderARMVIXL::VisitMax(HMax* max) { + CreateMinMaxLocations(GetGraph()->GetAllocator(), max); +} + +void InstructionCodeGeneratorARMVIXL::VisitMax(HMax* max) { + GenerateMinMax(max, /*is_min*/ false); +} + +void LocationsBuilderARMVIXL::VisitAbs(HAbs* abs) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs); + switch (abs->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + locations->AddTemp(Location::RequiresRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitAbs(HAbs* abs) { + LocationSummary* locations = abs->GetLocations(); + switch (abs->GetResultType()) { + case DataType::Type::kInt32: { + vixl32::Register in_reg = RegisterFrom(locations->InAt(0)); + vixl32::Register out_reg = RegisterFrom(locations->Out()); + vixl32::Register mask = RegisterFrom(locations->GetTemp(0)); + __ Asr(mask, in_reg, 31); + __ Add(out_reg, in_reg, mask); + __ Eor(out_reg, out_reg, mask); + break; + } + case DataType::Type::kInt64: { + Location in = locations->InAt(0); + vixl32::Register in_reg_lo = LowRegisterFrom(in); + vixl32::Register in_reg_hi = HighRegisterFrom(in); + Location output = locations->Out(); + vixl32::Register out_reg_lo = LowRegisterFrom(output); + vixl32::Register out_reg_hi = HighRegisterFrom(output); + DCHECK(!out_reg_lo.Is(in_reg_hi)) << "Diagonal overlap unexpected."; + vixl32::Register mask = RegisterFrom(locations->GetTemp(0)); + __ Asr(mask, in_reg_hi, 31); + __ Adds(out_reg_lo, in_reg_lo, mask); + __ Adc(out_reg_hi, in_reg_hi, mask); + __ Eor(out_reg_lo, out_reg_lo, mask); + __ Eor(out_reg_hi, out_reg_hi, mask); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Vabs(OutputVRegister(abs), InputVRegisterAt(abs, 0)); + break; + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); +} + +void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) { + DivZeroCheckSlowPathARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathARMVIXL(instruction); + codegen_->AddSlowPath(slow_path); + + LocationSummary* locations = instruction->GetLocations(); + Location value = locations->InAt(0); + + switch (instruction->GetType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + if (value.IsRegister()) { + __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); + } else { + DCHECK(value.IsConstant()) << value; + if (Int32ConstantFrom(value) == 0) { + __ B(slow_path->GetEntryLabel()); + } + } + break; + } + case DataType::Type::kInt64: { + if (value.IsRegisterPair()) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Orrs(temp, LowRegisterFrom(value), HighRegisterFrom(value)); + __ B(eq, slow_path->GetEntryLabel()); + } else { + DCHECK(value.IsConstant()) << value; + if (Int64ConstantFrom(value) == 0) { + __ B(slow_path->GetEntryLabel()); + } + } + break; + } + default: + LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType(); + } +} + +void InstructionCodeGeneratorARMVIXL::HandleIntegerRotate(HRor* ror) { + LocationSummary* locations = ror->GetLocations(); + vixl32::Register in = InputRegisterAt(ror, 0); + Location rhs = locations->InAt(1); + vixl32::Register out = OutputRegister(ror); + + if (rhs.IsConstant()) { + // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31], + // so map all rotations to a +ve. equivalent in that range. + // (e.g. left *or* right by -2 bits == 30 bits in the same direction.) + uint32_t rot = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()) & 0x1F; + if (rot) { + // Rotate, mapping left rotations to right equivalents if necessary. + // (e.g. left by 2 bits == right by 30.) + __ Ror(out, in, rot); + } else if (!out.Is(in)) { + __ Mov(out, in); + } + } else { + __ Ror(out, in, RegisterFrom(rhs)); + } +} + +// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer +// rotates by swapping input regs (effectively rotating by the first 32-bits of +// a larger rotation) or flipping direction (thus treating larger right/left +// rotations as sub-word sized rotations in the other direction) as appropriate. +void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) { + LocationSummary* locations = ror->GetLocations(); + vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0)); + vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0)); + Location rhs = locations->InAt(1); + vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out()); + vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out()); + + if (rhs.IsConstant()) { + uint64_t rot = CodeGenerator::GetInt64ValueOf(rhs.GetConstant()); + // Map all rotations to +ve. equivalents on the interval [0,63]. + rot &= kMaxLongShiftDistance; + // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate + // logic below to a simple pair of binary orr. + // (e.g. 34 bits == in_reg swap + 2 bits right.) + if (rot >= kArmBitsPerWord) { + rot -= kArmBitsPerWord; + std::swap(in_reg_hi, in_reg_lo); + } + // Rotate, or mov to out for zero or word size rotations. + if (rot != 0u) { + __ Lsr(out_reg_hi, in_reg_hi, Operand::From(rot)); + __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot)); + __ Lsr(out_reg_lo, in_reg_lo, Operand::From(rot)); + __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot)); + } else { + __ Mov(out_reg_lo, in_reg_lo); + __ Mov(out_reg_hi, in_reg_hi); + } + } else { + vixl32::Register shift_right = RegisterFrom(locations->GetTemp(0)); + vixl32::Register shift_left = RegisterFrom(locations->GetTemp(1)); + vixl32::Label end; + vixl32::Label shift_by_32_plus_shift_right; + vixl32::Label* final_label = codegen_->GetFinalLabel(ror, &end); + + __ And(shift_right, RegisterFrom(rhs), 0x1F); + __ Lsrs(shift_left, RegisterFrom(rhs), 6); + __ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord)); + __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false); + + // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right). + // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right). + __ Lsl(out_reg_hi, in_reg_hi, shift_left); + __ Lsr(out_reg_lo, in_reg_lo, shift_right); + __ Add(out_reg_hi, out_reg_hi, out_reg_lo); + __ Lsl(out_reg_lo, in_reg_lo, shift_left); + __ Lsr(shift_left, in_reg_hi, shift_right); + __ Add(out_reg_lo, out_reg_lo, shift_left); + __ B(final_label); + + __ Bind(&shift_by_32_plus_shift_right); // Shift by 32+shift_right. + // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left). + // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left). + __ Lsr(out_reg_hi, in_reg_hi, shift_right); + __ Lsl(out_reg_lo, in_reg_lo, shift_left); + __ Add(out_reg_hi, out_reg_hi, out_reg_lo); + __ Lsr(out_reg_lo, in_reg_lo, shift_right); + __ Lsl(shift_right, in_reg_hi, shift_left); + __ Add(out_reg_lo, out_reg_lo, shift_right); + + if (end.IsReferenced()) { + __ Bind(&end); + } + } +} + +void LocationsBuilderARMVIXL::VisitRor(HRor* ror) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall); + switch (ror->GetResultType()) { + case DataType::Type::kInt32: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(ror->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + } + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + if (ror->InputAt(1)->IsConstant()) { + locations->SetInAt(1, Location::ConstantLocation(ror->InputAt(1)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + } + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + break; + } + default: + LOG(FATAL) << "Unexpected operation type " << ror->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitRor(HRor* ror) { + DataType::Type type = ror->GetResultType(); + switch (type) { + case DataType::Type::kInt32: { + HandleIntegerRotate(ror); + break; + } + case DataType::Type::kInt64: { + HandleLongRotate(ror); + break; + } + default: + LOG(FATAL) << "Unexpected operation type " << type; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) { + DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); + + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall); + + switch (op->GetResultType()) { + case DataType::Type::kInt32: { + locations->SetInAt(0, Location::RequiresRegister()); + if (op->InputAt(1)->IsConstant()) { + locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant())); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + // Make the output overlap, as it will be used to hold the masked + // second input. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + } + break; + } + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + if (op->InputAt(1)->IsConstant()) { + locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant())); + // For simplicity, use kOutputOverlap even though we only require that low registers + // don't clash with high registers which the register allocator currently guarantees. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + } + break; + } + default: + LOG(FATAL) << "Unexpected operation type " << op->GetResultType(); + } +} + +void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) { + DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); + + LocationSummary* locations = op->GetLocations(); + Location out = locations->Out(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + + DataType::Type type = op->GetResultType(); + switch (type) { + case DataType::Type::kInt32: { + vixl32::Register out_reg = OutputRegister(op); + vixl32::Register first_reg = InputRegisterAt(op, 0); + if (second.IsRegister()) { + vixl32::Register second_reg = RegisterFrom(second); + // ARM doesn't mask the shift count so we need to do it ourselves. + __ And(out_reg, second_reg, kMaxIntShiftDistance); + if (op->IsShl()) { + __ Lsl(out_reg, first_reg, out_reg); + } else if (op->IsShr()) { + __ Asr(out_reg, first_reg, out_reg); + } else { + __ Lsr(out_reg, first_reg, out_reg); + } + } else { + int32_t cst = Int32ConstantFrom(second); + uint32_t shift_value = cst & kMaxIntShiftDistance; + if (shift_value == 0) { // ARM does not support shifting with 0 immediate. + __ Mov(out_reg, first_reg); + } else if (op->IsShl()) { + __ Lsl(out_reg, first_reg, shift_value); + } else if (op->IsShr()) { + __ Asr(out_reg, first_reg, shift_value); + } else { + __ Lsr(out_reg, first_reg, shift_value); + } + } + break; + } + case DataType::Type::kInt64: { + vixl32::Register o_h = HighRegisterFrom(out); + vixl32::Register o_l = LowRegisterFrom(out); + + vixl32::Register high = HighRegisterFrom(first); + vixl32::Register low = LowRegisterFrom(first); + + if (second.IsRegister()) { + vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); + + vixl32::Register second_reg = RegisterFrom(second); + + if (op->IsShl()) { + __ And(o_l, second_reg, kMaxLongShiftDistance); + // Shift the high part + __ Lsl(o_h, high, o_l); + // Shift the low part and `or` what overflew on the high part + __ Rsb(temp, o_l, Operand::From(kArmBitsPerWord)); + __ Lsr(temp, low, temp); + __ Orr(o_h, o_h, temp); + // If the shift is > 32 bits, override the high part + __ Subs(temp, o_l, Operand::From(kArmBitsPerWord)); + { + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(pl); + __ lsl(pl, o_h, low, temp); + } + // Shift the low part + __ Lsl(o_l, low, o_l); + } else if (op->IsShr()) { + __ And(o_h, second_reg, kMaxLongShiftDistance); + // Shift the low part + __ Lsr(o_l, low, o_h); + // Shift the high part and `or` what underflew on the low part + __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord)); + __ Lsl(temp, high, temp); + __ Orr(o_l, o_l, temp); + // If the shift is > 32 bits, override the low part + __ Subs(temp, o_h, Operand::From(kArmBitsPerWord)); + { + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(pl); + __ asr(pl, o_l, high, temp); + } + // Shift the high part + __ Asr(o_h, high, o_h); + } else { + __ And(o_h, second_reg, kMaxLongShiftDistance); + // same as Shr except we use `Lsr`s and not `Asr`s + __ Lsr(o_l, low, o_h); + __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord)); + __ Lsl(temp, high, temp); + __ Orr(o_l, o_l, temp); + __ Subs(temp, o_h, Operand::From(kArmBitsPerWord)); + { + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ it(pl); + __ lsr(pl, o_l, high, temp); + } + __ Lsr(o_h, high, o_h); + } + } else { + // Register allocator doesn't create partial overlap. + DCHECK(!o_l.Is(high)); + DCHECK(!o_h.Is(low)); + int32_t cst = Int32ConstantFrom(second); + uint32_t shift_value = cst & kMaxLongShiftDistance; + if (shift_value > 32) { + if (op->IsShl()) { + __ Lsl(o_h, low, shift_value - 32); + __ Mov(o_l, 0); + } else if (op->IsShr()) { + __ Asr(o_l, high, shift_value - 32); + __ Asr(o_h, high, 31); + } else { + __ Lsr(o_l, high, shift_value - 32); + __ Mov(o_h, 0); + } + } else if (shift_value == 32) { + if (op->IsShl()) { + __ Mov(o_h, low); + __ Mov(o_l, 0); + } else if (op->IsShr()) { + __ Mov(o_l, high); + __ Asr(o_h, high, 31); + } else { + __ Mov(o_l, high); + __ Mov(o_h, 0); + } + } else if (shift_value == 1) { + if (op->IsShl()) { + __ Lsls(o_l, low, 1); + __ Adc(o_h, high, high); + } else if (op->IsShr()) { + __ Asrs(o_h, high, 1); + __ Rrx(o_l, low); + } else { + __ Lsrs(o_h, high, 1); + __ Rrx(o_l, low); + } + } else if (shift_value == 0) { + __ Mov(o_l, low); + __ Mov(o_h, high); + } else { + DCHECK(0 < shift_value && shift_value < 32) << shift_value; + if (op->IsShl()) { + __ Lsl(o_h, high, shift_value); + __ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value)); + __ Lsl(o_l, low, shift_value); + } else if (op->IsShr()) { + __ Lsr(o_l, low, shift_value); + __ Orr(o_l, o_l, Operand(high, ShiftType::LSL, 32 - shift_value)); + __ Asr(o_h, high, shift_value); + } else { + __ Lsr(o_l, low, shift_value); + __ Orr(o_l, o_l, Operand(high, ShiftType::LSL, 32 - shift_value)); + __ Lsr(o_h, high, shift_value); + } + } + } + break; + } + default: + LOG(FATAL) << "Unexpected operation type " << type; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitShl(HShl* shl) { + HandleShift(shl); +} + +void InstructionCodeGeneratorARMVIXL::VisitShl(HShl* shl) { + HandleShift(shl); +} + +void LocationsBuilderARMVIXL::VisitShr(HShr* shr) { + HandleShift(shr); +} + +void InstructionCodeGeneratorARMVIXL::VisitShr(HShr* shr) { + HandleShift(shr); +} + +void LocationsBuilderARMVIXL::VisitUShr(HUShr* ushr) { + HandleShift(ushr); +} + +void InstructionCodeGeneratorARMVIXL::VisitUShr(HUShr* ushr) { + HandleShift(ushr); +} + +void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetOut(LocationFrom(r0)); +} + +void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) { + codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11); +} + +void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetOut(LocationFrom(r0)); + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); +} + +void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference. + QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction); + codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); + DCHECK(!codegen_->IsLeafMethod()); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12); +} + +void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); + if (location.IsStackSlot()) { + location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); + } else if (location.IsDoubleStackSlot()) { + location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); + } + locations->SetOut(location); +} + +void InstructionCodeGeneratorARMVIXL::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the parameter is already at its location. +} + +void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(LocationFrom(kMethodRegister)); +} + +void InstructionCodeGeneratorARMVIXL::VisitCurrentMethod( + HCurrentMethod* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the method is already at its location. +} + +void LocationsBuilderARMVIXL::VisitNot(HNot* not_) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) { + LocationSummary* locations = not_->GetLocations(); + Location out = locations->Out(); + Location in = locations->InAt(0); + switch (not_->GetResultType()) { + case DataType::Type::kInt32: + __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0)); + break; + + case DataType::Type::kInt64: + __ Mvn(LowRegisterFrom(out), LowRegisterFrom(in)); + __ Mvn(HighRegisterFrom(out), HighRegisterFrom(in)); + break; + + default: + LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType(); + } +} + +void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) { + __ Eor(OutputRegister(bool_not), InputRegister(bool_not), 1); +} + +void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall); + switch (compare->InputAt(0)->GetType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // Output overlaps because it is written before doing the low comparison. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1))); + locations->SetOut(Location::RequiresRegister()); + break; + } + default: + LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { + LocationSummary* locations = compare->GetLocations(); + vixl32::Register out = OutputRegister(compare); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + vixl32::Label less, greater, done; + vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done); + DataType::Type type = compare->InputAt(0)->GetType(); + vixl32::Condition less_cond = vixl32::Condition::None(); + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + // Emit move to `out` before the `Cmp`, as `Mov` might affect the status flags. + __ Mov(out, 0); + __ Cmp(RegisterFrom(left), RegisterFrom(right)); // Signed compare. + less_cond = lt; + break; + } + case DataType::Type::kInt64: { + __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare. + __ B(lt, &less, /* is_far_target= */ false); + __ B(gt, &greater, /* is_far_target= */ false); + // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags. + __ Mov(out, 0); + __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare. + less_cond = lo; + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + __ Mov(out, 0); + GenerateVcmp(compare, codegen_); + // To branch on the FP compare result we transfer FPSCR to APSR (encoded as PC in VMRS). + __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); + less_cond = ARMFPCondition(kCondLT, compare->IsGtBias()); + break; + } + default: + LOG(FATAL) << "Unexpected compare type " << type; + UNREACHABLE(); + } + + __ B(eq, final_label, /* is_far_target= */ false); + __ B(less_cond, &less, /* is_far_target= */ false); + + __ Bind(&greater); + __ Mov(out, 1); + __ B(final_label); + + __ Bind(&less); + __ Mov(out, -1); + + if (done.IsReferenced()) { + __ Bind(&done); + } +} + +void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { + locations->SetInAt(i, Location::Any()); + } + locations->SetOut(Location::Any()); +} + +void InstructionCodeGeneratorARMVIXL::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unreachable"; +} + +void CodeGeneratorARMVIXL::GenerateMemoryBarrier(MemBarrierKind kind) { + // TODO (ported from quick): revisit ARM barrier kinds. + DmbOptions flavor = DmbOptions::ISH; // Quiet C++ warnings. + switch (kind) { + case MemBarrierKind::kAnyStore: + case MemBarrierKind::kLoadAny: + case MemBarrierKind::kAnyAny: { + flavor = DmbOptions::ISH; + break; + } + case MemBarrierKind::kStoreStore: { + flavor = DmbOptions::ISHST; + break; + } + default: + LOG(FATAL) << "Unexpected memory barrier " << kind; + } + __ Dmb(flavor); +} + +void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicLoad(vixl32::Register addr, + uint32_t offset, + vixl32::Register out_lo, + vixl32::Register out_hi) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + if (offset != 0) { + vixl32::Register temp = temps.Acquire(); + __ Add(temp, addr, offset); + addr = temp; + } + __ Ldrexd(out_lo, out_hi, MemOperand(addr)); +} + +void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr, + uint32_t offset, + vixl32::Register value_lo, + vixl32::Register value_hi, + vixl32::Register temp1, + vixl32::Register temp2, + HInstruction* instruction) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Label fail; + if (offset != 0) { + vixl32::Register temp = temps.Acquire(); + __ Add(temp, addr, offset); + addr = temp; + } + __ Bind(&fail); + { + // Ensure the pc position is recorded immediately after the `ldrexd` instruction. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + // We need a load followed by store. (The address used in a STREX instruction must + // be the same as the address in the most recently executed LDREX instruction.) + __ ldrexd(temp1, temp2, MemOperand(addr)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + __ Strexd(temp1, value_lo, value_hi, MemOperand(addr)); + __ CompareAndBranchIfNonZero(temp1, &fail); +} + +void LocationsBuilderARMVIXL::HandleFieldSet( + HInstruction* instruction, const FieldInfo& field_info) { + DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); + + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + + DataType::Type field_type = field_info.GetFieldType(); + if (DataType::IsFloatingPointType(field_type)) { + locations->SetInAt(1, Location::RequiresFpuRegister()); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } + + bool is_wide = field_type == DataType::Type::kInt64 || field_type == DataType::Type::kFloat64; + bool generate_volatile = field_info.IsVolatile() + && is_wide + && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); + // Temporary registers for the write barrier. + // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark. + if (needs_write_barrier) { + locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. + locations->AddTemp(Location::RequiresRegister()); + } else if (generate_volatile) { + // ARM encoding have some additional constraints for ldrexd/strexd: + // - registers need to be consecutive + // - the first register should be even but not R14. + // We don't test for ARM yet, and the assertion makes sure that we + // revisit this if we ever enable ARM encoding. + DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); + + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + if (field_type == DataType::Type::kFloat64) { + // For doubles we need two more registers to copy the value. + locations->AddTemp(LocationFrom(r2)); + locations->AddTemp(LocationFrom(r3)); + } + } +} + +void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction, + const FieldInfo& field_info, + bool value_can_be_null) { + DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); + + LocationSummary* locations = instruction->GetLocations(); + vixl32::Register base = InputRegisterAt(instruction, 0); + Location value = locations->InAt(1); + + bool is_volatile = field_info.IsVolatile(); + bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); + DataType::Type field_type = field_info.GetFieldType(); + uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); + + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore); + } + + switch (field_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + StoreOperandType operand_type = GetStoreOperandType(field_type); + GetAssembler()->StoreToOffset(operand_type, RegisterFrom(value), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kReference: { + vixl32::Register value_reg = RegisterFrom(value); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(field_type, DataType::Type::kReference); + value_reg = RegisterFrom(locations->GetTemp(0)); + __ Mov(value_reg, RegisterFrom(value)); + GetAssembler()->PoisonHeapReference(value_reg); + } + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreToOffset(kStoreWord, value_reg, base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kInt64: { + if (is_volatile && !atomic_ldrd_strd) { + GenerateWideAtomicStore(base, + offset, + LowRegisterFrom(value), + HighRegisterFrom(value), + RegisterFrom(locations->GetTemp(0)), + RegisterFrom(locations->GetTemp(1)), + instruction); + } else { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + break; + } + + case DataType::Type::kFloat32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreSToOffset(SRegisterFrom(value), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat64: { + vixl32::DRegister value_reg = DRegisterFrom(value); + if (is_volatile && !atomic_ldrd_strd) { + vixl32::Register value_reg_lo = RegisterFrom(locations->GetTemp(0)); + vixl32::Register value_reg_hi = RegisterFrom(locations->GetTemp(1)); + + __ Vmov(value_reg_lo, value_reg_hi, value_reg); + + GenerateWideAtomicStore(base, + offset, + value_reg_lo, + value_reg_hi, + RegisterFrom(locations->GetTemp(2)), + RegisterFrom(locations->GetTemp(3)), + instruction); + } else { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreDToOffset(value_reg, base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + break; + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << field_type; + UNREACHABLE(); + } + + if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { + vixl32::Register temp = RegisterFrom(locations->GetTemp(0)); + vixl32::Register card = RegisterFrom(locations->GetTemp(1)); + codegen_->MarkGCCard(temp, card, base, RegisterFrom(value), value_can_be_null); + } + + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny); + } +} + +void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, + const FieldInfo& field_info) { + DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); + + bool object_field_get_with_read_barrier = + kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_field_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); + if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + locations->SetInAt(0, Location::RequiresRegister()); + + bool volatile_for_double = field_info.IsVolatile() + && (field_info.GetFieldType() == DataType::Type::kFloat64) + && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); + // The output overlaps in case of volatile long: we don't want the + // code generated by GenerateWideAtomicLoad to overwrite the + // object's location. Likewise, in the case of an object field get + // with read barriers enabled, we do not want the load to overwrite + // the object's location, as we need it to emit the read barrier. + bool overlap = + (field_info.IsVolatile() && (field_info.GetFieldType() == DataType::Type::kInt64)) || + object_field_get_with_read_barrier; + + if (DataType::IsFloatingPointType(instruction->GetType())) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetOut(Location::RequiresRegister(), + (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap)); + } + if (volatile_for_double) { + // ARM encoding have some additional constraints for ldrexd/strexd: + // - registers need to be consecutive + // - the first register should be even but not R14. + // We don't test for ARM yet, and the assertion makes sure that we + // revisit this if we ever enable ARM encoding. + DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { + // We need a temporary register for the read barrier load in + // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier() + // only if the offset is too big. + if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) { + locations->AddTemp(Location::RequiresRegister()); + } + } +} + +Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) { + DCHECK(DataType::IsFloatingPointType(input->GetType())) << input->GetType(); + if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) || + (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) { + return Location::ConstantLocation(input->AsConstant()); + } else { + return Location::RequiresFpuRegister(); + } +} + +Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* constant, + Opcode opcode) { + DCHECK(!DataType::IsFloatingPointType(constant->GetType())); + if (constant->IsConstant() && + CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) { + return Location::ConstantLocation(constant->AsConstant()); + } + return Location::RequiresRegister(); +} + +static bool CanEncode32BitConstantAsImmediate( + CodeGeneratorARMVIXL* codegen, + uint32_t value, + Opcode opcode, + vixl32::FlagsUpdate flags_update = vixl32::FlagsUpdate::DontCare) { + ArmVIXLAssembler* assembler = codegen->GetAssembler(); + if (assembler->ShifterOperandCanHold(opcode, value, flags_update)) { + return true; + } + Opcode neg_opcode = kNoOperand; + uint32_t neg_value = 0; + switch (opcode) { + case AND: neg_opcode = BIC; neg_value = ~value; break; + case ORR: neg_opcode = ORN; neg_value = ~value; break; + case ADD: neg_opcode = SUB; neg_value = -value; break; + case ADC: neg_opcode = SBC; neg_value = ~value; break; + case SUB: neg_opcode = ADD; neg_value = -value; break; + case SBC: neg_opcode = ADC; neg_value = ~value; break; + case MOV: neg_opcode = MVN; neg_value = ~value; break; + default: + return false; + } + + if (assembler->ShifterOperandCanHold(neg_opcode, neg_value, flags_update)) { + return true; + } + + return opcode == AND && IsPowerOfTwo(value + 1); +} + +bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode) { + uint64_t value = static_cast(Int64FromConstant(input_cst)); + if (DataType::Is64BitType(input_cst->GetType())) { + Opcode high_opcode = opcode; + vixl32::FlagsUpdate low_flags_update = vixl32::FlagsUpdate::DontCare; + switch (opcode) { + case SUB: + // Flip the operation to an ADD. + value = -value; + opcode = ADD; + FALLTHROUGH_INTENDED; + case ADD: + if (Low32Bits(value) == 0u) { + return CanEncode32BitConstantAsImmediate(codegen_, High32Bits(value), opcode); + } + high_opcode = ADC; + low_flags_update = vixl32::FlagsUpdate::SetFlags; + break; + default: + break; + } + return CanEncode32BitConstantAsImmediate(codegen_, High32Bits(value), high_opcode) && + CanEncode32BitConstantAsImmediate(codegen_, Low32Bits(value), opcode, low_flags_update); + } else { + return CanEncode32BitConstantAsImmediate(codegen_, Low32Bits(value), opcode); + } +} + +void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, + const FieldInfo& field_info) { + DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); + + LocationSummary* locations = instruction->GetLocations(); + vixl32::Register base = InputRegisterAt(instruction, 0); + Location out = locations->Out(); + bool is_volatile = field_info.IsVolatile(); + bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); + DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType())); + DataType::Type load_type = instruction->GetType(); + uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + + switch (load_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + LoadOperandType operand_type = GetLoadOperandType(load_type); + GetAssembler()->LoadFromOffset(operand_type, RegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kReference: { + // /* HeapReference */ out = *(base + offset) + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location(); + // Note that a potential implicit null check is handled in this + // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call. + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true); + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } + } else { + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, locations->InAt(0), offset); + } + break; + } + + case DataType::Type::kInt64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + if (is_volatile && !atomic_ldrd_strd) { + GenerateWideAtomicLoad(base, offset, LowRegisterFrom(out), HighRegisterFrom(out)); + } else { + GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out), base, offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadSFromOffset(SRegisterFrom(out), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + vixl32::DRegister out_dreg = DRegisterFrom(out); + if (is_volatile && !atomic_ldrd_strd) { + vixl32::Register lo = RegisterFrom(locations->GetTemp(0)); + vixl32::Register hi = RegisterFrom(locations->GetTemp(1)); + GenerateWideAtomicLoad(base, offset, lo, hi); + codegen_->MaybeRecordImplicitNullCheck(instruction); + __ Vmov(out_dreg, lo, hi); + } else { + GetAssembler()->LoadDFromOffset(out_dreg, base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + break; + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << load_type; + UNREACHABLE(); + } + + if (is_volatile) { + if (load_type == DataType::Type::kReference) { + // Memory barriers, in the case of references, are also handled + // in the previous switch statement. + } else { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } + } +} + +void LocationsBuilderARMVIXL::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARMVIXL::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); +} + +void LocationsBuilderARMVIXL::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARMVIXL::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void LocationsBuilderARMVIXL::VisitStaticFieldGet(HStaticFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARMVIXL::VisitStaticFieldGet(HStaticFieldGet* instruction) { + HandleFieldGet(instruction, instruction->GetFieldInfo()); +} + +void LocationsBuilderARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo()); +} + +void InstructionCodeGeneratorARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) { + HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); +} + +void LocationsBuilderARMVIXL::VisitStringBuilderAppend(HStringBuilderAppend* instruction) { + codegen_->CreateStringBuilderAppendLocations(instruction, LocationFrom(r0)); +} + +void InstructionCodeGeneratorARMVIXL::VisitStringBuilderAppend(HStringBuilderAppend* instruction) { + __ Mov(r0, instruction->GetFormat()->GetValue()); + codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc()); +} + +void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARMVIXL calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) { + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + locations->SetInAt(0, Location::RequiresRegister()); +} + +void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) { + if (CanMoveNullCheckToUser(instruction)) { + return; + } + + UseScratchRegisterScope temps(GetVIXLAssembler()); + // Ensure the pc position is recorded immediately after the `ldr` instruction. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ ldr(temps.Acquire(), MemOperand(InputRegisterAt(instruction, 0))); + RecordPcInfo(instruction, instruction->GetDexPc()); +} + +void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) { + NullCheckSlowPathARMVIXL* slow_path = + new (GetScopedAllocator()) NullCheckSlowPathARMVIXL(instruction); + AddSlowPath(slow_path); + __ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); +} + +void InstructionCodeGeneratorARMVIXL::VisitNullCheck(HNullCheck* instruction) { + codegen_->GenerateNullCheck(instruction); +} + +void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(DataType::Type type, + Location out_loc, + vixl32::Register base, + vixl32::Register reg_index, + vixl32::Condition cond) { + uint32_t shift_count = DataType::SizeShift(type); + MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count); + + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + __ Ldrb(cond, RegisterFrom(out_loc), mem_address); + break; + case DataType::Type::kInt8: + __ Ldrsb(cond, RegisterFrom(out_loc), mem_address); + break; + case DataType::Type::kUint16: + __ Ldrh(cond, RegisterFrom(out_loc), mem_address); + break; + case DataType::Type::kInt16: + __ Ldrsh(cond, RegisterFrom(out_loc), mem_address); + break; + case DataType::Type::kReference: + case DataType::Type::kInt32: + __ Ldr(cond, RegisterFrom(out_loc), mem_address); + break; + // T32 doesn't support LoadFromShiftedRegOffset mem address mode for these types. + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + default: + LOG(FATAL) << "Unreachable type " << type; + UNREACHABLE(); + } +} + +void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(DataType::Type type, + Location loc, + vixl32::Register base, + vixl32::Register reg_index, + vixl32::Condition cond) { + uint32_t shift_count = DataType::SizeShift(type); + MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count); + + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + __ Strb(cond, RegisterFrom(loc), mem_address); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + __ Strh(cond, RegisterFrom(loc), mem_address); + break; + case DataType::Type::kReference: + case DataType::Type::kInt32: + __ Str(cond, RegisterFrom(loc), mem_address); + break; + // T32 doesn't support StoreToShiftedRegOffset mem address mode for these types. + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + default: + LOG(FATAL) << "Unreachable type " << type; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { + bool object_array_get_with_read_barrier = + kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, + object_array_get_with_read_barrier + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall); + if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (DataType::IsFloatingPointType(instruction->GetType())) { + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } else { + // The output overlaps in the case of an object array get with + // read barriers enabled: we do not want the move to overwrite the + // array's location, as we need it to emit the read barrier. + locations->SetOut( + Location::RequiresRegister(), + object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap); + } + if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { + if (instruction->GetIndex()->IsConstant()) { + // Array loads with constant index are treated as field loads. + // We need a temporary register for the read barrier load in + // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier() + // only if the offset is too big. + uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction); + uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue(); + offset += index << DataType::SizeShift(DataType::Type::kReference); + if (offset >= kReferenceLoadMinFarOffset) { + locations->AddTemp(Location::RequiresRegister()); + } + } else { + // We need a non-scratch temporary for the array data pointer in + // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(). + locations->AddTemp(Location::RequiresRegister()); + } + } else if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + // Also need a temporary for String compression feature. + locations->AddTemp(Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location obj_loc = locations->InAt(0); + vixl32::Register obj = InputRegisterAt(instruction, 0); + Location index = locations->InAt(1); + Location out_loc = locations->Out(); + uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction); + DataType::Type type = instruction->GetType(); + const bool maybe_compressed_char_at = mirror::kUseStringCompression && + instruction->IsStringCharAt(); + HInstruction* array_instr = instruction->GetArray(); + bool has_intermediate_address = array_instr->IsIntermediateAddress(); + + switch (type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + vixl32::Register length; + if (maybe_compressed_char_at) { + length = RegisterFrom(locations->GetTemp(0)); + uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadFromOffset(kLoadWord, length, obj, count_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + if (index.IsConstant()) { + int32_t const_index = Int32ConstantFrom(index); + if (maybe_compressed_char_at) { + vixl32::Label uncompressed_load, done; + vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done); + __ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not. + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + __ B(cs, &uncompressed_load, /* is_far_target= */ false); + GetAssembler()->LoadFromOffset(kLoadUnsignedByte, + RegisterFrom(out_loc), + obj, + data_offset + const_index); + __ B(final_label); + __ Bind(&uncompressed_load); + GetAssembler()->LoadFromOffset(GetLoadOperandType(DataType::Type::kUint16), + RegisterFrom(out_loc), + obj, + data_offset + (const_index << 1)); + if (done.IsReferenced()) { + __ Bind(&done); + } + } else { + uint32_t full_offset = data_offset + (const_index << DataType::SizeShift(type)); + + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + LoadOperandType load_type = GetLoadOperandType(type); + GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + + if (has_intermediate_address) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset); + } + temp = obj; + } else { + __ Add(temp, obj, data_offset); + } + if (maybe_compressed_char_at) { + vixl32::Label uncompressed_load, done; + vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done); + __ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not. + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + __ B(cs, &uncompressed_load, /* is_far_target= */ false); + __ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0)); + __ B(final_label); + __ Bind(&uncompressed_load); + __ Ldrh(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 1)); + if (done.IsReferenced()) { + __ Bind(&done); + } + } else { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } + break; + } + + case DataType::Type::kReference: { + // The read barrier instrumentation of object ArrayGet + // instructions does not support the HIntermediateAddress + // instruction. + DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier)); + + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + // /* HeapReference */ out = + // *(obj + data_offset + index * sizeof(HeapReference)) + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Note that a potential implicit null check is handled in this + // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier call. + DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); + if (index.IsConstant()) { + // Array load with a constant index can be treated as a field load. + Location maybe_temp = + (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location(); + data_offset += Int32ConstantFrom(index) << DataType::SizeShift(type); + codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction, + out_loc, + obj, + data_offset, + maybe_temp, + /* needs_null_check= */ false); + } else { + Location temp = locations->GetTemp(0); + codegen_->GenerateArrayLoadWithBakerReadBarrier( + out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false); + } + } else { + vixl32::Register out = OutputRegister(instruction); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + + if (has_intermediate_address) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset); + } + temp = obj; + } else { + __ Add(temp, obj, data_offset); + } + { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index)); + temps.Close(); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow( + instruction, out_loc, out_loc, obj_loc, data_offset, index); + } + } + break; + } + + case DataType::Type::kInt64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + if (index.IsConstant()) { + size_t offset = + (Int32ConstantFrom(index) << TIMES_8) + data_offset; + GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), obj, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); + GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat32: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + vixl32::SRegister out = SRegisterFrom(out_loc); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + GetAssembler()->LoadSFromOffset(out, obj, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4)); + GetAssembler()->LoadSFromOffset(out, temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat64: { + // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; + GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); + GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << type; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) { + DataType::Type value_type = instruction->GetComponentType(); + + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); + bool needs_type_check = instruction->NeedsTypeCheck(); + + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, + needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (DataType::IsFloatingPointType(value_type)) { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresRegister()); + } + if (needs_write_barrier) { + // Temporary registers for the write barrier. + locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. + locations->AddTemp(Location::RequiresRegister()); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::Register array = InputRegisterAt(instruction, 0); + Location index = locations->InAt(1); + DataType::Type value_type = instruction->GetComponentType(); + bool needs_type_check = instruction->NeedsTypeCheck(); + bool needs_write_barrier = + CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); + uint32_t data_offset = + mirror::Array::DataOffset(DataType::Size(value_type)).Uint32Value(); + Location value_loc = locations->InAt(2); + HInstruction* array_instr = instruction->GetArray(); + bool has_intermediate_address = array_instr->IsIntermediateAddress(); + + switch (value_type) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + if (index.IsConstant()) { + int32_t const_index = Int32ConstantFrom(index); + uint32_t full_offset = + data_offset + (const_index << DataType::SizeShift(value_type)); + StoreOperandType store_type = GetStoreOperandType(value_type); + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + + if (has_intermediate_address) { + // We do not need to compute the intermediate address from the array: the + // input instruction has done it already. See the comment in + // `TryExtractArrayAccessAddress()`. + if (kIsDebugBuild) { + HIntermediateAddress* tmp = array_instr->AsIntermediateAddress(); + DCHECK_EQ(Uint64ConstantFrom(tmp->GetOffset()), data_offset); + } + temp = array; + } else { + __ Add(temp, array, data_offset); + } + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); + codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + break; + } + + case DataType::Type::kReference: { + vixl32::Register value = RegisterFrom(value_loc); + // TryExtractArrayAccessAddress optimization is never applied for non-primitive ArraySet. + // See the comment in instruction_simplifier_shared.cc. + DCHECK(!has_intermediate_address); + + if (instruction->InputAt(2)->IsNullConstant()) { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + // Just setting null. + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + GetAssembler()->StoreToOffset(kStoreWord, value, array, offset); + } else { + DCHECK(index.IsRegister()) << index; + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, data_offset); + codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index)); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + DCHECK(!needs_write_barrier); + DCHECK(!needs_type_check); + break; + } + + DCHECK(needs_write_barrier); + Location temp1_loc = locations->GetTemp(0); + vixl32::Register temp1 = RegisterFrom(temp1_loc); + Location temp2_loc = locations->GetTemp(1); + vixl32::Register temp2 = RegisterFrom(temp2_loc); + + bool can_value_be_null = instruction->GetValueCanBeNull(); + vixl32::Label do_store; + if (can_value_be_null) { + __ CompareAndBranchIfZero(value, &do_store, /* is_far_target= */ false); + } + + SlowPathCodeARMVIXL* slow_path = nullptr; + if (needs_type_check) { + slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction); + codegen_->AddSlowPath(slow_path); + + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + + // Note that when read barriers are enabled, the type checks + // are performed without read barriers. This is fine, even in + // the case where a class object is in the from-space after + // the flip, as a comparison involving such a type would not + // produce a false positive; it may of course produce a false + // negative, in which case we would take the ArraySet slow + // path. + + { + // Ensure we record the pc position immediately after the `ldr` instruction. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + // /* HeapReference */ temp1 = array->klass_ + __ ldr(temp1, MemOperand(array, class_offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + GetAssembler()->MaybeUnpoisonHeapReference(temp1); + + // /* HeapReference */ temp1 = temp1->component_type_ + GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, component_offset); + // /* HeapReference */ temp2 = value->klass_ + GetAssembler()->LoadFromOffset(kLoadWord, temp2, value, class_offset); + // If heap poisoning is enabled, no need to unpoison `temp1` + // nor `temp2`, as we are comparing two poisoned references. + __ Cmp(temp1, temp2); + + if (instruction->StaticTypeOfArrayIsObjectArray()) { + vixl32::Label do_put; + __ B(eq, &do_put, /* is_far_target= */ false); + // If heap poisoning is enabled, the `temp1` reference has + // not been unpoisoned yet; unpoison it now. + GetAssembler()->MaybeUnpoisonHeapReference(temp1); + + // /* HeapReference */ temp1 = temp1->super_class_ + GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, super_offset); + // If heap poisoning is enabled, no need to unpoison + // `temp1`, as we are comparing against null below. + __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel()); + __ Bind(&do_put); + } else { + __ B(ne, slow_path->GetEntryLabel()); + } + } + + codegen_->MarkGCCard(temp1, temp2, array, value, /* can_be_null= */ false); + + if (can_value_be_null) { + DCHECK(do_store.IsReferenced()); + __ Bind(&do_store); + } + + vixl32::Register source = value; + if (kPoisonHeapReferences) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(value_type, DataType::Type::kReference); + __ Mov(temp1, value); + GetAssembler()->PoisonHeapReference(temp1); + source = temp1; + } + + { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + GetAssembler()->StoreToOffset(kStoreWord, source, array, offset); + } else { + DCHECK(index.IsRegister()) << index; + + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, data_offset); + codegen_->StoreToShiftedRegOffset(value_type, + LocationFrom(source), + temp, + RegisterFrom(index)); + } + + if (can_value_be_null || !needs_type_check) { + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + } + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } + + break; + } + + case DataType::Type::kInt64: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + Location value = locations->InAt(2); + if (index.IsConstant()) { + size_t offset = + (Int32ConstantFrom(index) << TIMES_8) + data_offset; + GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), array, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); + GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat32: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + Location value = locations->InAt(2); + DCHECK(value.IsFpuRegister()); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset; + GetAssembler()->StoreSToOffset(SRegisterFrom(value), array, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4)); + GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kFloat64: { + // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. + // As two macro instructions can be emitted the max size is doubled. + EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes); + Location value = locations->InAt(2); + DCHECK(value.IsFpuRegisterPair()); + if (index.IsConstant()) { + size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset; + GetAssembler()->StoreDToOffset(DRegisterFrom(value), array, offset); + } else { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8)); + GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset); + } + codegen_->MaybeRecordImplicitNullCheck(instruction); + break; + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unreachable type " << value_type; + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitArrayLength(HArrayLength* instruction) { + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); + vixl32::Register obj = InputRegisterAt(instruction, 0); + vixl32::Register out = OutputRegister(instruction); + { + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ ldr(out, MemOperand(obj, offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + } + // Mask out compression flag from String's array length. + if (mirror::kUseStringCompression && instruction->IsStringLength()) { + __ Lsr(out, out, 1u); + } +} + +void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset())); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) { + vixl32::Register out = OutputRegister(instruction); + vixl32::Register first = InputRegisterAt(instruction, 0); + Location second = instruction->GetLocations()->InAt(1); + + if (second.IsRegister()) { + __ Add(out, first, RegisterFrom(second)); + } else { + __ Add(out, first, Int32ConstantFrom(second)); + } +} + +void LocationsBuilderARMVIXL::VisitIntermediateAddressIndex( + HIntermediateAddressIndex* instruction) { + LOG(FATAL) << "Unreachable " << instruction->GetId(); +} + +void InstructionCodeGeneratorARMVIXL::VisitIntermediateAddressIndex( + HIntermediateAddressIndex* instruction) { + LOG(FATAL) << "Unreachable " << instruction->GetId(); +} + +void LocationsBuilderARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) { + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0))); + caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); + + HInstruction* index = instruction->InputAt(0); + HInstruction* length = instruction->InputAt(1); + // If both index and length are constants we can statically check the bounds. But if at least one + // of them is not encodable ArmEncodableConstantOrRegister will create + // Location::RequiresRegister() which is not desired to happen. Instead we create constant + // locations. + bool both_const = index->IsConstant() && length->IsConstant(); + locations->SetInAt(0, both_const + ? Location::ConstantLocation(index->AsConstant()) + : ArmEncodableConstantOrRegister(index, CMP)); + locations->SetInAt(1, both_const + ? Location::ConstantLocation(length->AsConstant()) + : ArmEncodableConstantOrRegister(length, CMP)); +} + +void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location index_loc = locations->InAt(0); + Location length_loc = locations->InAt(1); + + if (length_loc.IsConstant()) { + int32_t length = Int32ConstantFrom(length_loc); + if (index_loc.IsConstant()) { + // BCE will remove the bounds check if we are guaranteed to pass. + int32_t index = Int32ConstantFrom(index_loc); + if (index < 0 || index >= length) { + SlowPathCodeARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + } else { + // Some optimization after BCE may have generated this, and we should not + // generate a bounds check if it is a valid range. + } + return; + } + + SlowPathCodeARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + __ Cmp(RegisterFrom(index_loc), length); + codegen_->AddSlowPath(slow_path); + __ B(hs, slow_path->GetEntryLabel()); + } else { + SlowPathCodeARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARMVIXL(instruction); + __ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0)); + codegen_->AddSlowPath(slow_path); + __ B(ls, slow_path->GetEntryLabel()); + } +} + +void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp, + vixl32::Register card, + vixl32::Register object, + vixl32::Register value, + bool can_be_null) { + vixl32::Label is_null; + if (can_be_null) { + __ CompareAndBranchIfZero(value, &is_null); + } + // Load the address of the card table into `card`. + GetAssembler()->LoadFromOffset( + kLoadWord, card, tr, Thread::CardTableOffset().Int32Value()); + // Calculate the offset (in the card table) of the card corresponding to + // `object`. + __ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift)); + // Write the `art::gc::accounting::CardTable::kCardDirty` value into the + // `object`'s card. + // + // Register `card` contains the address of the card table. Note that the card + // table's base is biased during its creation so that it always starts at an + // address whose least-significant byte is equal to `kCardDirty` (see + // art::gc::accounting::CardTable::Create). Therefore the STRB instruction + // below writes the `kCardDirty` (byte) value into the `object`'s card + // (located at `card + object >> kCardShift`). + // + // This dual use of the value in register `card` (1. to calculate the location + // of the card to mark; and 2. to load the `kCardDirty` value) saves a load + // (no need to explicitly load `kCardDirty` as an immediate value). + __ Strb(card, MemOperand(card, temp)); + if (can_be_null) { + __ Bind(&is_null); + } +} + +void LocationsBuilderARMVIXL::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) { + if (instruction->GetNext()->IsSuspendCheck() && + instruction->GetBlock()->GetLoopInformation() != nullptr) { + HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck(); + // The back edge will generate the suspend check. + codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction); + } + + codegen_->GetMoveResolver()->EmitNativeCode(instruction); +} + +void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnSlowPath); + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. +} + +void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) { + HBasicBlock* block = instruction->GetBlock(); + if (block->GetLoopInformation() != nullptr) { + DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); + // The back edge will generate the suspend check. + return; + } + if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { + // The goto will generate the suspend check. + return; + } + GenerateSuspendCheck(instruction, nullptr); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13); +} + +void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction, + HBasicBlock* successor) { + SuspendCheckSlowPathARMVIXL* slow_path = + down_cast(instruction->GetSlowPath()); + if (slow_path == nullptr) { + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor); + instruction->SetSlowPath(slow_path); + codegen_->AddSlowPath(slow_path); + if (successor != nullptr) { + DCHECK(successor->IsLoopHeader()); + } + } else { + DCHECK_EQ(slow_path->GetSuccessor(), successor); + } + + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + GetAssembler()->LoadFromOffset( + kLoadUnsignedHalfword, temp, tr, Thread::ThreadFlagsOffset().Int32Value()); + if (successor == nullptr) { + __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetReturnLabel()); + } else { + __ CompareAndBranchIfZero(temp, codegen_->GetLabelOf(successor)); + __ B(slow_path->GetEntryLabel()); + } +} + +ArmVIXLAssembler* ParallelMoveResolverARMVIXL::GetAssembler() const { + return codegen_->GetAssembler(); +} + +void ParallelMoveResolverARMVIXL::EmitMove(size_t index) { + UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); + MoveOperands* move = moves_[index]; + Location source = move->GetSource(); + Location destination = move->GetDestination(); + + if (source.IsRegister()) { + if (destination.IsRegister()) { + __ Mov(RegisterFrom(destination), RegisterFrom(source)); + } else if (destination.IsFpuRegister()) { + __ Vmov(SRegisterFrom(destination), RegisterFrom(source)); + } else { + DCHECK(destination.IsStackSlot()); + GetAssembler()->StoreToOffset(kStoreWord, + RegisterFrom(source), + sp, + destination.GetStackIndex()); + } + } else if (source.IsStackSlot()) { + if (destination.IsRegister()) { + GetAssembler()->LoadFromOffset(kLoadWord, + RegisterFrom(destination), + sp, + source.GetStackIndex()); + } else if (destination.IsFpuRegister()) { + GetAssembler()->LoadSFromOffset(SRegisterFrom(destination), sp, source.GetStackIndex()); + } else { + DCHECK(destination.IsStackSlot()); + vixl32::Register temp = temps.Acquire(); + GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, source.GetStackIndex()); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + } + } else if (source.IsFpuRegister()) { + if (destination.IsRegister()) { + __ Vmov(RegisterFrom(destination), SRegisterFrom(source)); + } else if (destination.IsFpuRegister()) { + __ Vmov(SRegisterFrom(destination), SRegisterFrom(source)); + } else { + DCHECK(destination.IsStackSlot()); + GetAssembler()->StoreSToOffset(SRegisterFrom(source), sp, destination.GetStackIndex()); + } + } else if (source.IsDoubleStackSlot()) { + if (destination.IsDoubleStackSlot()) { + vixl32::DRegister temp = temps.AcquireD(); + GetAssembler()->LoadDFromOffset(temp, sp, source.GetStackIndex()); + GetAssembler()->StoreDToOffset(temp, sp, destination.GetStackIndex()); + } else if (destination.IsRegisterPair()) { + DCHECK(ExpectedPairLayout(destination)); + GetAssembler()->LoadFromOffset( + kLoadWordPair, LowRegisterFrom(destination), sp, source.GetStackIndex()); + } else { + DCHECK(destination.IsFpuRegisterPair()) << destination; + GetAssembler()->LoadDFromOffset(DRegisterFrom(destination), sp, source.GetStackIndex()); + } + } else if (source.IsRegisterPair()) { + if (destination.IsRegisterPair()) { + __ Mov(LowRegisterFrom(destination), LowRegisterFrom(source)); + __ Mov(HighRegisterFrom(destination), HighRegisterFrom(source)); + } else if (destination.IsFpuRegisterPair()) { + __ Vmov(DRegisterFrom(destination), LowRegisterFrom(source), HighRegisterFrom(source)); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + DCHECK(ExpectedPairLayout(source)); + GetAssembler()->StoreToOffset(kStoreWordPair, + LowRegisterFrom(source), + sp, + destination.GetStackIndex()); + } + } else if (source.IsFpuRegisterPair()) { + if (destination.IsRegisterPair()) { + __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), DRegisterFrom(source)); + } else if (destination.IsFpuRegisterPair()) { + __ Vmov(DRegisterFrom(destination), DRegisterFrom(source)); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + GetAssembler()->StoreDToOffset(DRegisterFrom(source), sp, destination.GetStackIndex()); + } + } else { + DCHECK(source.IsConstant()) << source; + HConstant* constant = source.GetConstant(); + if (constant->IsIntConstant() || constant->IsNullConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(constant); + if (destination.IsRegister()) { + __ Mov(RegisterFrom(destination), value); + } else { + DCHECK(destination.IsStackSlot()); + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, value); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + } + } else if (constant->IsLongConstant()) { + int64_t value = Int64ConstantFrom(source); + if (destination.IsRegisterPair()) { + __ Mov(LowRegisterFrom(destination), Low32Bits(value)); + __ Mov(HighRegisterFrom(destination), High32Bits(value)); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, Low32Bits(value)); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + __ Mov(temp, High32Bits(value)); + GetAssembler()->StoreToOffset(kStoreWord, + temp, + sp, + destination.GetHighStackIndex(kArmWordSize)); + } + } else if (constant->IsDoubleConstant()) { + double value = constant->AsDoubleConstant()->GetValue(); + if (destination.IsFpuRegisterPair()) { + __ Vmov(DRegisterFrom(destination), value); + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + uint64_t int_value = bit_cast(value); + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, Low32Bits(int_value)); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + __ Mov(temp, High32Bits(int_value)); + GetAssembler()->StoreToOffset(kStoreWord, + temp, + sp, + destination.GetHighStackIndex(kArmWordSize)); + } + } else { + DCHECK(constant->IsFloatConstant()) << constant->DebugName(); + float value = constant->AsFloatConstant()->GetValue(); + if (destination.IsFpuRegister()) { + __ Vmov(SRegisterFrom(destination), value); + } else { + DCHECK(destination.IsStackSlot()); + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, bit_cast(value)); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex()); + } + } + } +} + +void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) { + UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, reg); + GetAssembler()->LoadFromOffset(kLoadWord, reg, sp, mem); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, mem); +} + +void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) { + // TODO(VIXL32): Double check the performance of this implementation. + UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); + vixl32::Register temp1 = temps.Acquire(); + ScratchRegisterScope ensure_scratch( + this, temp1.GetCode(), r0.GetCode(), codegen_->GetNumberOfCoreRegisters()); + vixl32::Register temp2(ensure_scratch.GetRegister()); + + int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0; + GetAssembler()->LoadFromOffset(kLoadWord, temp1, sp, mem1 + stack_offset); + GetAssembler()->LoadFromOffset(kLoadWord, temp2, sp, mem2 + stack_offset); + GetAssembler()->StoreToOffset(kStoreWord, temp1, sp, mem2 + stack_offset); + GetAssembler()->StoreToOffset(kStoreWord, temp2, sp, mem1 + stack_offset); +} + +void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) { + MoveOperands* move = moves_[index]; + Location source = move->GetSource(); + Location destination = move->GetDestination(); + UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); + + if (source.IsRegister() && destination.IsRegister()) { + vixl32::Register temp = temps.Acquire(); + DCHECK(!RegisterFrom(source).Is(temp)); + DCHECK(!RegisterFrom(destination).Is(temp)); + __ Mov(temp, RegisterFrom(destination)); + __ Mov(RegisterFrom(destination), RegisterFrom(source)); + __ Mov(RegisterFrom(source), temp); + } else if (source.IsRegister() && destination.IsStackSlot()) { + Exchange(RegisterFrom(source), destination.GetStackIndex()); + } else if (source.IsStackSlot() && destination.IsRegister()) { + Exchange(RegisterFrom(destination), source.GetStackIndex()); + } else if (source.IsStackSlot() && destination.IsStackSlot()) { + Exchange(source.GetStackIndex(), destination.GetStackIndex()); + } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { + vixl32::Register temp = temps.Acquire(); + __ Vmov(temp, SRegisterFrom(source)); + __ Vmov(SRegisterFrom(source), SRegisterFrom(destination)); + __ Vmov(SRegisterFrom(destination), temp); + } else if (source.IsRegisterPair() && destination.IsRegisterPair()) { + vixl32::DRegister temp = temps.AcquireD(); + __ Vmov(temp, LowRegisterFrom(source), HighRegisterFrom(source)); + __ Mov(LowRegisterFrom(source), LowRegisterFrom(destination)); + __ Mov(HighRegisterFrom(source), HighRegisterFrom(destination)); + __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), temp); + } else if (source.IsRegisterPair() || destination.IsRegisterPair()) { + vixl32::Register low_reg = LowRegisterFrom(source.IsRegisterPair() ? source : destination); + int mem = source.IsRegisterPair() ? destination.GetStackIndex() : source.GetStackIndex(); + DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination)); + vixl32::DRegister temp = temps.AcquireD(); + __ Vmov(temp, low_reg, vixl32::Register(low_reg.GetCode() + 1)); + GetAssembler()->LoadFromOffset(kLoadWordPair, low_reg, sp, mem); + GetAssembler()->StoreDToOffset(temp, sp, mem); + } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) { + vixl32::DRegister first = DRegisterFrom(source); + vixl32::DRegister second = DRegisterFrom(destination); + vixl32::DRegister temp = temps.AcquireD(); + __ Vmov(temp, first); + __ Vmov(first, second); + __ Vmov(second, temp); + } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) { + vixl32::DRegister reg = source.IsFpuRegisterPair() + ? DRegisterFrom(source) + : DRegisterFrom(destination); + int mem = source.IsFpuRegisterPair() + ? destination.GetStackIndex() + : source.GetStackIndex(); + vixl32::DRegister temp = temps.AcquireD(); + __ Vmov(temp, reg); + GetAssembler()->LoadDFromOffset(reg, sp, mem); + GetAssembler()->StoreDToOffset(temp, sp, mem); + } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { + vixl32::SRegister reg = source.IsFpuRegister() + ? SRegisterFrom(source) + : SRegisterFrom(destination); + int mem = source.IsFpuRegister() + ? destination.GetStackIndex() + : source.GetStackIndex(); + vixl32::Register temp = temps.Acquire(); + __ Vmov(temp, reg); + GetAssembler()->LoadSFromOffset(reg, sp, mem); + GetAssembler()->StoreToOffset(kStoreWord, temp, sp, mem); + } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { + vixl32::DRegister temp1 = temps.AcquireD(); + vixl32::DRegister temp2 = temps.AcquireD(); + __ Vldr(temp1, MemOperand(sp, source.GetStackIndex())); + __ Vldr(temp2, MemOperand(sp, destination.GetStackIndex())); + __ Vstr(temp1, MemOperand(sp, destination.GetStackIndex())); + __ Vstr(temp2, MemOperand(sp, source.GetStackIndex())); + } else { + LOG(FATAL) << "Unimplemented" << source << " <-> " << destination; + } +} + +void ParallelMoveResolverARMVIXL::SpillScratch(int reg) { + __ Push(vixl32::Register(reg)); +} + +void ParallelMoveResolverARMVIXL::RestoreScratch(int reg) { + __ Pop(vixl32::Register(reg)); +} + +HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind( + HLoadClass::LoadKind desired_class_load_kind) { + switch (desired_class_load_kind) { + case HLoadClass::LoadKind::kInvalid: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + case HLoadClass::LoadKind::kReferrersClass: + break; + case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: + case HLoadClass::LoadKind::kBootImageRelRo: + case HLoadClass::LoadKind::kBssEntry: + DCHECK(!Runtime::Current()->UseJitCompilation()); + break; + case HLoadClass::LoadKind::kJitBootImageAddress: + case HLoadClass::LoadKind::kJitTableAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); + break; + case HLoadClass::LoadKind::kRuntimeCall: + break; + } + return desired_class_load_kind; +} + +void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { + HLoadClass::LoadKind load_kind = cls->GetLoadKind(); + if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + CodeGenerator::CreateLoadClassRuntimeCallLocationSummary( + cls, + LocationFrom(calling_convention.GetRegisterAt(0)), + LocationFrom(r0)); + DCHECK(calling_convention.GetRegisterAt(0).Is(r0)); + return; + } + DCHECK(!cls->NeedsAccessCheck()); + + const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall; + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind); + if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + + if (load_kind == HLoadClass::LoadKind::kReferrersClass) { + locations->SetInAt(0, Location::RequiresRegister()); + } + locations->SetOut(Location::RequiresRegister()); + if (load_kind == HLoadClass::LoadKind::kBssEntry) { + if (!kUseReadBarrier || kUseBakerReadBarrier) { + // Rely on the type resolution or initialization and marking to save everything we need. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); + } else { + // For non-Baker read barrier we have a temp-clobbering call. + } + } +} + +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS { + HLoadClass::LoadKind load_kind = cls->GetLoadKind(); + if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { + codegen_->GenerateLoadClassRuntimeCall(cls); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14); + return; + } + DCHECK(!cls->NeedsAccessCheck()); + + LocationSummary* locations = cls->GetLocations(); + Location out_loc = locations->Out(); + vixl32::Register out = OutputRegister(cls); + + const ReadBarrierOption read_barrier_option = cls->IsInBootImage() + ? kWithoutReadBarrier + : kCompilerReadBarrierOption; + bool generate_null_check = false; + switch (load_kind) { + case HLoadClass::LoadKind::kReferrersClass: { + DCHECK(!cls->CanCallRuntime()); + DCHECK(!cls->MustGenerateClinitCheck()); + // /* GcRoot */ out = current_method->declaring_class_ + vixl32::Register current_method = InputRegisterAt(cls, 0); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + current_method, + ArtMethod::DeclaringClassOffset().Int32Value(), + read_barrier_option); + break; + } + case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: { + DCHECK(codegen_->GetCompilerOptions().IsBootImage() || + codegen_->GetCompilerOptions().IsBootImageExtension()); + DCHECK_EQ(read_barrier_option, kWithoutReadBarrier); + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex()); + codegen_->EmitMovwMovtPlaceholder(labels, out); + break; + } + case HLoadClass::LoadKind::kBootImageRelRo: { + DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls)); + codegen_->EmitMovwMovtPlaceholder(labels, out); + __ Ldr(out, MemOperand(out, /* offset= */ 0)); + break; + } + case HLoadClass::LoadKind::kBssEntry: { + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex()); + codegen_->EmitMovwMovtPlaceholder(labels, out); + // All aligned loads are implicitly atomic consume operations on ARM. + codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option); + generate_null_check = true; + break; + } + case HLoadClass::LoadKind::kJitBootImageAddress: { + DCHECK_EQ(read_barrier_option, kWithoutReadBarrier); + uint32_t address = reinterpret_cast32(cls->GetClass().Get()); + DCHECK_NE(address, 0u); + __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address)); + break; + } + case HLoadClass::LoadKind::kJitTableAddress: { + __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), + cls->GetTypeIndex(), + cls->GetClass())); + // /* GcRoot */ out = *out + codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option); + break; + } + case HLoadClass::LoadKind::kRuntimeCall: + case HLoadClass::LoadKind::kInvalid: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } + + if (generate_null_check || cls->MustGenerateClinitCheck()) { + DCHECK(cls->CanCallRuntime()); + LoadClassSlowPathARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(cls, cls); + codegen_->AddSlowPath(slow_path); + if (generate_null_check) { + __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); + } + if (cls->MustGenerateClinitCheck()) { + GenerateClassInitializationCheck(slow_path, out); + } else { + __ Bind(slow_path->GetExitLabel()); + } + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15); + } +} + +void LocationsBuilderARMVIXL::VisitLoadMethodHandle(HLoadMethodHandle* load) { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + Location location = LocationFrom(calling_convention.GetRegisterAt(0)); + CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location); +} + +void InstructionCodeGeneratorARMVIXL::VisitLoadMethodHandle(HLoadMethodHandle* load) { + codegen_->GenerateLoadMethodHandleRuntimeCall(load); +} + +void LocationsBuilderARMVIXL::VisitLoadMethodType(HLoadMethodType* load) { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + Location location = LocationFrom(calling_convention.GetRegisterAt(0)); + CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(load, location, location); +} + +void InstructionCodeGeneratorARMVIXL::VisitLoadMethodType(HLoadMethodType* load) { + codegen_->GenerateLoadMethodTypeRuntimeCall(load); +} + +void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, Location::RequiresRegister()); + if (check->HasUses()) { + locations->SetOut(Location::SameAsFirstInput()); + } + // Rely on the type initialization to save everything we need. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); +} + +void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) { + // We assume the class is not null. + LoadClassSlowPathARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), check); + codegen_->AddSlowPath(slow_path); + GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); +} + +void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck( + LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf(); + constexpr uint32_t shifted_visibly_initialized_value = + enum_cast(ClassStatus::kVisiblyInitialized) << status_lsb_position; + + const size_t status_offset = mirror::Class::StatusOffset().SizeValue(); + GetAssembler()->LoadFromOffset(kLoadWord, temp, class_reg, status_offset); + __ Cmp(temp, shifted_visibly_initialized_value); + __ B(lo, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorARMVIXL::GenerateBitstringTypeCheckCompare( + HTypeCheckInstruction* check, + vixl32::Register temp, + vixl32::FlagsUpdate flags_update) { + uint32_t path_to_root = check->GetBitstringPathToRoot(); + uint32_t mask = check->GetBitstringMask(); + DCHECK(IsPowerOfTwo(mask + 1)); + size_t mask_bits = WhichPowerOf2(mask + 1); + + // Note that HInstanceOf shall check for zero value in `temp` but HCheckCast needs + // the Z flag for BNE. This is indicated by the `flags_update` parameter. + if (mask_bits == 16u) { + // Load only the bitstring part of the status word. + __ Ldrh(temp, MemOperand(temp, mirror::Class::StatusOffset().Int32Value())); + // Check if the bitstring bits are equal to `path_to_root`. + if (flags_update == SetFlags) { + __ Cmp(temp, path_to_root); + } else { + __ Sub(temp, temp, path_to_root); + } + } else { + // /* uint32_t */ temp = temp->status_ + __ Ldr(temp, MemOperand(temp, mirror::Class::StatusOffset().Int32Value())); + if (GetAssembler()->ShifterOperandCanHold(SUB, path_to_root)) { + // Compare the bitstring bits using SUB. + __ Sub(temp, temp, path_to_root); + // Shift out bits that do not contribute to the comparison. + __ Lsl(flags_update, temp, temp, dchecked_integral_cast(32u - mask_bits)); + } else if (IsUint<16>(path_to_root)) { + if (temp.IsLow()) { + // Note: Optimized for size but contains one more dependent instruction than necessary. + // MOVW+SUB(register) would be 8 bytes unless we find a low-reg temporary but the + // macro assembler would use the high reg IP for the constant by default. + // Compare the bitstring bits using SUB. + __ Sub(temp, temp, path_to_root & 0x00ffu); // 16-bit SUB (immediate) T2 + __ Sub(temp, temp, path_to_root & 0xff00u); // 32-bit SUB (immediate) T3 + // Shift out bits that do not contribute to the comparison. + __ Lsl(flags_update, temp, temp, dchecked_integral_cast(32u - mask_bits)); + } else { + // Extract the bitstring bits. + __ Ubfx(temp, temp, 0, mask_bits); + // Check if the bitstring bits are equal to `path_to_root`. + if (flags_update == SetFlags) { + __ Cmp(temp, path_to_root); + } else { + __ Sub(temp, temp, path_to_root); + } + } + } else { + // Shift out bits that do not contribute to the comparison. + __ Lsl(temp, temp, dchecked_integral_cast(32u - mask_bits)); + // Check if the shifted bitstring bits are equal to `path_to_root << (32u - mask_bits)`. + if (flags_update == SetFlags) { + __ Cmp(temp, path_to_root << (32u - mask_bits)); + } else { + __ Sub(temp, temp, path_to_root << (32u - mask_bits)); + } + } + } +} + +HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind( + HLoadString::LoadKind desired_string_load_kind) { + switch (desired_string_load_kind) { + case HLoadString::LoadKind::kBootImageLinkTimePcRelative: + case HLoadString::LoadKind::kBootImageRelRo: + case HLoadString::LoadKind::kBssEntry: + DCHECK(!Runtime::Current()->UseJitCompilation()); + break; + case HLoadString::LoadKind::kJitBootImageAddress: + case HLoadString::LoadKind::kJitTableAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); + break; + case HLoadString::LoadKind::kRuntimeCall: + break; + } + return desired_string_load_kind; +} + +void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) { + LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load); + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind); + HLoadString::LoadKind load_kind = load->GetLoadKind(); + if (load_kind == HLoadString::LoadKind::kRuntimeCall) { + locations->SetOut(LocationFrom(r0)); + } else { + locations->SetOut(Location::RequiresRegister()); + if (load_kind == HLoadString::LoadKind::kBssEntry) { + if (!kUseReadBarrier || kUseBakerReadBarrier) { + // Rely on the pResolveString and marking to save everything we need, including temps. + locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); + } else { + // For non-Baker read barrier we have a temp-clobbering call. + } + } + } +} + +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { + LocationSummary* locations = load->GetLocations(); + Location out_loc = locations->Out(); + vixl32::Register out = OutputRegister(load); + HLoadString::LoadKind load_kind = load->GetLoadKind(); + + switch (load_kind) { + case HLoadString::LoadKind::kBootImageLinkTimePcRelative: { + DCHECK(codegen_->GetCompilerOptions().IsBootImage() || + codegen_->GetCompilerOptions().IsBootImageExtension()); + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex()); + codegen_->EmitMovwMovtPlaceholder(labels, out); + return; + } + case HLoadString::LoadKind::kBootImageRelRo: { + DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load)); + codegen_->EmitMovwMovtPlaceholder(labels, out); + __ Ldr(out, MemOperand(out, /* offset= */ 0)); + return; + } + case HLoadString::LoadKind::kBssEntry: { + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex()); + codegen_->EmitMovwMovtPlaceholder(labels, out); + // All aligned loads are implicitly atomic consume operations on ARM. + codegen_->GenerateGcRootFieldLoad( + load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption); + LoadStringSlowPathARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); + codegen_->AddSlowPath(slow_path); + __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16); + return; + } + case HLoadString::LoadKind::kJitBootImageAddress: { + uint32_t address = reinterpret_cast32(load->GetString().Get()); + DCHECK_NE(address, 0u); + __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address)); + return; + } + case HLoadString::LoadKind::kJitTableAddress: { + __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), + load->GetStringIndex(), + load->GetString())); + // /* GcRoot */ out = *out + codegen_->GenerateGcRootFieldLoad( + load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption); + return; + } + default: + break; + } + + // TODO: Re-add the compiler code to do string dex cache lookup again. + DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_); + codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc()); + CheckEntrypointTypes(); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17); +} + +static int32_t GetExceptionTlsOffset() { + return Thread::ExceptionOffset().Int32Value(); +} + +void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) { + vixl32::Register out = OutputRegister(load); + GetAssembler()->LoadFromOffset(kLoadWord, out, tr, GetExceptionTlsOffset()); +} + + +void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) { + new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + __ Mov(temp, 0); + GetAssembler()->StoreToOffset(kStoreWord, temp, tr, GetExceptionTlsOffset()); +} + +void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); +} + +void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) { + codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc()); + CheckEntrypointTypes(); +} + +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && + (kUseBakerReadBarrier || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; +} + +// Interface case has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + return 3; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); +} + +void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + bool baker_read_barrier_slow_path = false; + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kAbstractClassCheck: + case TypeCheckKind::kClassHierarchyCheck: + case TypeCheckKind::kArrayObjectCheck: { + bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction); + call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; + baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier; + break; + } + case TypeCheckKind::kArrayCheck: + case TypeCheckKind::kUnresolvedCheck: + case TypeCheckKind::kInterfaceCheck: + call_kind = LocationSummary::kCallOnSlowPath; + break; + case TypeCheckKind::kBitstringCheck: + break; + } + + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); + if (baker_read_barrier_slow_path) { + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. + } + locations->SetInAt(0, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } + // The "out" register is used as a temporary, so it overlaps with the inputs. + // Note that TypeCheckSlowPathARM uses this register too. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); +} + +void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary* locations = instruction->GetLocations(); + Location obj_loc = locations->InAt(0); + vixl32::Register obj = InputRegisterAt(instruction, 0); + vixl32::Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? vixl32::Register() + : InputRegisterAt(instruction, 1); + Location out_loc = locations->Out(); + vixl32::Register out = OutputRegister(instruction); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + vixl32::Label done; + vixl32::Label* const final_label = codegen_->GetFinalLabel(instruction, &done); + SlowPathCodeARMVIXL* slow_path = nullptr; + + // Return 0 if `obj` is null. + // avoid null check if we know obj is not null. + if (instruction->MustDoNullCheck()) { + DCHECK(!out.Is(obj)); + __ Mov(out, 0); + __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false); + } + + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // Classes must be equal for the instanceof to succeed. + __ Cmp(out, cls); + // We speculatively set the result to false without changing the condition + // flags, which allows us to avoid some branching later. + __ Mov(LeaveFlags, out, 0); + + // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, + // we check that the output is in a low register, so that a 16-bit MOV + // encoding can be used. + if (out.IsLow()) { + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(eq); + __ mov(eq, out, 1); + } else { + __ B(ne, final_label, /* is_far_target= */ false); + __ Mov(out, 1); + } + + break; + } + + case TypeCheckKind::kAbstractClassCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // If the class is abstract, we eagerly fetch the super class of the + // object to avoid doing a comparison we know will fail. + vixl32::Label loop; + __ Bind(&loop); + // /* HeapReference */ out = out->super_class_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + read_barrier_option); + // If `out` is null, we use it for the result, and jump to the final label. + __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false); + __ Cmp(out, cls); + __ B(ne, &loop, /* is_far_target= */ false); + __ Mov(out, 1); + break; + } + + case TypeCheckKind::kClassHierarchyCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // Walk over the class hierarchy to find a match. + vixl32::Label loop, success; + __ Bind(&loop); + __ Cmp(out, cls); + __ B(eq, &success, /* is_far_target= */ false); + // /* HeapReference */ out = out->super_class_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + super_offset, + maybe_temp_loc, + read_barrier_option); + // This is essentially a null check, but it sets the condition flags to the + // proper value for the code that follows the loop, i.e. not `eq`. + __ Cmp(out, 1); + __ B(hs, &loop, /* is_far_target= */ false); + + // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, + // we check that the output is in a low register, so that a 16-bit MOV + // encoding can be used. + if (out.IsLow()) { + // If `out` is null, we use it for the result, and the condition flags + // have already been set to `ne`, so the IT block that comes afterwards + // (and which handles the successful case) turns into a NOP (instead of + // overwriting `out`). + __ Bind(&success); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + // There is only one branch to the `success` label (which is bound to this + // IT block), and it has the same condition, `eq`, so in that case the MOV + // is executed. + __ it(eq); + __ mov(eq, out, 1); + } else { + // If `out` is null, we use it for the result, and jump to the final label. + __ B(final_label); + __ Bind(&success); + __ Mov(out, 1); + } + + break; + } + + case TypeCheckKind::kArrayObjectCheck: { + ReadBarrierOption read_barrier_option = + CodeGenerator::ReadBarrierOptionForInstanceOf(instruction); + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + read_barrier_option); + // Do an exact check. + vixl32::Label exact_check; + __ Cmp(out, cls); + __ B(eq, &exact_check, /* is_far_target= */ false); + // Otherwise, we need to check that the object's class is a non-primitive array. + // /* HeapReference */ out = out->component_type_ + GenerateReferenceLoadOneRegister(instruction, + out_loc, + component_offset, + maybe_temp_loc, + read_barrier_option); + // If `out` is null, we use it for the result, and jump to the final label. + __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false); + GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset); + static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); + __ Cmp(out, 0); + // We speculatively set the result to false without changing the condition + // flags, which allows us to avoid some branching later. + __ Mov(LeaveFlags, out, 0); + + // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, + // we check that the output is in a low register, so that a 16-bit MOV + // encoding can be used. + if (out.IsLow()) { + __ Bind(&exact_check); + + // We use the scope because of the IT block that follows. + ExactAssemblyScope guard(GetVIXLAssembler(), + 2 * vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + + __ it(eq); + __ mov(eq, out, 1); + } else { + __ B(ne, final_label, /* is_far_target= */ false); + __ Bind(&exact_check); + __ Mov(out, 1); + } + + break; + } + + case TypeCheckKind::kArrayCheck: { + // No read barrier since the slow path will retry upon failure. + // /* HeapReference */ out = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); + __ Cmp(out, cls); + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, /* is_fatal= */ false); + codegen_->AddSlowPath(slow_path); + __ B(ne, slow_path->GetEntryLabel()); + __ Mov(out, 1); + break; + } + + case TypeCheckKind::kUnresolvedCheck: + case TypeCheckKind::kInterfaceCheck: { + // Note that we indeed only call on slow path, but we always go + // into the slow path for the unresolved and interface check + // cases. + // + // We cannot directly call the InstanceofNonTrivial runtime + // entry point without resorting to a type checking slow path + // here (i.e. by calling InvokeRuntime directly), as it would + // require to assign fixed registers for the inputs of this + // HInstanceOf instruction (following the runtime calling + // convention), which might be cluttered by the potential first + // read barrier emission at the beginning of this method. + // + // TODO: Introduce a new runtime entry point taking the object + // to test (instead of its class) as argument, and let it deal + // with the read barrier issues. This will let us refactor this + // case of the `switch` code as it was previously (with a direct + // call to the runtime not using a type checking slow path). + // This should also be beneficial for the other cases above. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, /* is_fatal= */ false); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, out, DontCare); + // If `out` is a low reg and we would have another low reg temp, we could + // optimize this as RSBS+ADC, see GenerateConditionWithZero(). + // + // Also, in some cases when `out` is a low reg and we're loading a constant to IP + // it would make sense to use CMP+MOV+IT+MOV instead of SUB+CLZ+LSR as the code size + // would be the same and we would have fewer direct data dependencies. + codegen_->GenerateConditionWithZero(kCondEQ, out, out); // CLZ+LSR + break; + } + } + + if (done.IsReferenced()) { + __ Bind(&done); + } + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } +} + +void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); +} + +void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); + LocationSummary* locations = instruction->GetLocations(); + Location obj_loc = locations->InAt(0); + vixl32::Register obj = InputRegisterAt(instruction, 0); + vixl32::Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? vixl32::Register() + : InputRegisterAt(instruction, 1); + Location temp_loc = locations->GetTemp(0); + vixl32::Register temp = RegisterFrom(temp_loc); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_LE(num_temps, 3u); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation(); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + + bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction); + SlowPathCodeARMVIXL* type_check_slow_path = + new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( + instruction, is_type_check_slow_path_fatal); + codegen_->AddSlowPath(type_check_slow_path); + + vixl32::Label done; + vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done); + // Avoid null check if we know obj is not null. + if (instruction->MustDoNullCheck()) { + __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false); + } + + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kArrayCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + __ Cmp(temp, cls); + // Jump to slow path for throwing the exception or doing a + // more involved array check. + __ B(ne, type_check_slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class is abstract, we eagerly fetch the super class of the + // object to avoid doing a comparison we know will fail. + vixl32::Label loop; + __ Bind(&loop); + // /* HeapReference */ temp = temp->super_class_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); + + // Otherwise, compare the classes. + __ Cmp(temp, cls); + __ B(ne, &loop, /* is_far_target= */ false); + break; + } + + case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // Walk over the class hierarchy to find a match. + vixl32::Label loop; + __ Bind(&loop); + __ Cmp(temp, cls); + __ B(eq, final_label, /* is_far_target= */ false); + + // /* HeapReference */ temp = temp->super_class_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + super_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, jump to the beginning of the loop. + __ B(&loop); + break; + } + + case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // Do an exact check. + __ Cmp(temp, cls); + __ B(eq, final_label, /* is_far_target= */ false); + + // Otherwise, we need to check that the object's class is a non-primitive array. + // /* HeapReference */ temp = temp->component_type_ + GenerateReferenceLoadOneRegister(instruction, + temp_loc, + component_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // If the component type is null, jump to the slow path to throw the exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type` + // to further check that this component type is not a primitive type. + GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset); + static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); + __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel()); + break; + } + + case TypeCheckKind::kUnresolvedCheck: + // We always go into the type check slow path for the unresolved check case. + // We cannot directly call the CheckCast runtime entry point + // without resorting to a type checking slow path here (i.e. by + // calling InvokeRuntime directly), as it would require to + // assign fixed registers for the inputs of this HInstanceOf + // instruction (following the runtime calling convention), which + // might be cluttered by the potential first read barrier + // emission at the beginning of this method. + + __ B(type_check_slow_path->GetEntryLabel()); + break; + + case TypeCheckKind::kInterfaceCheck: { + // Avoid read barriers to improve performance of the fast path. We can not get false + // positives by doing this. + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + // /* HeapReference */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + // Iftable is never null. + __ Ldr(RegisterFrom(maybe_temp2_loc), MemOperand(temp, array_length_offset)); + // Loop through the iftable and check if any class matches. + vixl32::Label start_loop; + __ Bind(&start_loop); + __ CompareAndBranchIfZero(RegisterFrom(maybe_temp2_loc), + type_check_slow_path->GetEntryLabel()); + __ Ldr(RegisterFrom(maybe_temp3_loc), MemOperand(temp, object_array_data_offset)); + GetAssembler()->MaybeUnpoisonHeapReference(RegisterFrom(maybe_temp3_loc)); + // Go to next interface. + __ Add(temp, temp, Operand::From(2 * kHeapReferenceSize)); + __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2); + // Compare the classes and continue the loop if they do not match. + __ Cmp(cls, RegisterFrom(maybe_temp3_loc)); + __ B(ne, &start_loop, /* is_far_target= */ false); + break; + } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, temp, SetFlags); + __ B(ne, type_check_slow_path->GetEntryLabel()); + break; + } + } + if (done.IsReferenced()) { + __ Bind(&done); + } + + __ Bind(type_check_slow_path->GetExitLabel()); +} + +void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary( + instruction, LocationSummary::kCallOnMainOnly); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); +} + +void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) { + codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject, + instruction, + instruction->GetDexPc()); + if (instruction->IsEnter()) { + CheckEntrypointTypes(); + } else { + CheckEntrypointTypes(); + } + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18); +} + +void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) { + HandleBitwiseOperation(instruction, AND); +} + +void LocationsBuilderARMVIXL::VisitOr(HOr* instruction) { + HandleBitwiseOperation(instruction, ORR); +} + +void LocationsBuilderARMVIXL::VisitXor(HXor* instruction) { + HandleBitwiseOperation(instruction, EOR); +} + +void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); + // Note: GVN reorders commutative operations to have the constant on the right hand side. + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode)); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitAnd(HAnd* instruction) { + HandleBitwiseOperation(instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitOr(HOr* instruction) { + HandleBitwiseOperation(instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) { + HandleBitwiseOperation(instruction); +} + +void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + DCHECK(instruction->GetResultType() == DataType::Type::kInt32 + || instruction->GetResultType() == DataType::Type::kInt64); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + Location out = locations->Out(); + + if (instruction->GetResultType() == DataType::Type::kInt32) { + vixl32::Register first_reg = RegisterFrom(first); + vixl32::Register second_reg = RegisterFrom(second); + vixl32::Register out_reg = RegisterFrom(out); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(out_reg, first_reg, second_reg); + break; + case HInstruction::kOr: + __ Orn(out_reg, first_reg, second_reg); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + return; + + } else { + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); + vixl32::Register first_low = LowRegisterFrom(first); + vixl32::Register first_high = HighRegisterFrom(first); + vixl32::Register second_low = LowRegisterFrom(second); + vixl32::Register second_high = HighRegisterFrom(second); + vixl32::Register out_low = LowRegisterFrom(out); + vixl32::Register out_high = HighRegisterFrom(out); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ Bic(out_low, first_low, second_low); + __ Bic(out_high, first_high, second_high); + break; + case HInstruction::kOr: + __ Orn(out_low, first_low, second_low); + __ Orn(out_high, first_high, second_high); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + } +} + +void LocationsBuilderARMVIXL::VisitDataProcWithShifterOp( + HDataProcWithShifterOp* instruction) { + DCHECK(instruction->GetType() == DataType::Type::kInt32 || + instruction->GetType() == DataType::Type::kInt64); + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + const bool overlap = instruction->GetType() == DataType::Type::kInt64 && + HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind()); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), + overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitDataProcWithShifterOp( + HDataProcWithShifterOp* instruction) { + const LocationSummary* const locations = instruction->GetLocations(); + const HInstruction::InstructionKind kind = instruction->GetInstrKind(); + const HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind(); + + if (instruction->GetType() == DataType::Type::kInt32) { + const vixl32::Register first = InputRegisterAt(instruction, 0); + const vixl32::Register output = OutputRegister(instruction); + const vixl32::Register second = instruction->InputAt(1)->GetType() == DataType::Type::kInt64 + ? LowRegisterFrom(locations->InAt(1)) + : InputRegisterAt(instruction, 1); + + if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { + DCHECK_EQ(kind, HInstruction::kAdd); + + switch (op_kind) { + case HDataProcWithShifterOp::kUXTB: + __ Uxtab(output, first, second); + break; + case HDataProcWithShifterOp::kUXTH: + __ Uxtah(output, first, second); + break; + case HDataProcWithShifterOp::kSXTB: + __ Sxtab(output, first, second); + break; + case HDataProcWithShifterOp::kSXTH: + __ Sxtah(output, first, second); + break; + default: + LOG(FATAL) << "Unexpected operation kind: " << op_kind; + UNREACHABLE(); + } + } else { + GenerateDataProcInstruction(kind, + output, + first, + Operand(second, + ShiftFromOpKind(op_kind), + instruction->GetShiftAmount()), + codegen_); + } + } else { + DCHECK_EQ(instruction->GetType(), DataType::Type::kInt64); + + if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) { + const vixl32::Register second = InputRegisterAt(instruction, 1); + + DCHECK(!LowRegisterFrom(locations->Out()).Is(second)); + GenerateDataProc(kind, + locations->Out(), + locations->InAt(0), + second, + Operand(second, ShiftType::ASR, 31), + codegen_); + } else { + GenerateLongDataProc(instruction, codegen_); + } + } +} + +// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl. +void InstructionCodeGeneratorARMVIXL::GenerateAndConst(vixl32::Register out, + vixl32::Register first, + uint32_t value) { + // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier). + if (value == 0xffffffffu) { + if (!out.Is(first)) { + __ Mov(out, first); + } + return; + } + if (value == 0u) { + __ Mov(out, 0); + return; + } + if (GetAssembler()->ShifterOperandCanHold(AND, value)) { + __ And(out, first, value); + } else if (GetAssembler()->ShifterOperandCanHold(BIC, ~value)) { + __ Bic(out, first, ~value); + } else { + DCHECK(IsPowerOfTwo(value + 1)); + __ Ubfx(out, first, 0, WhichPowerOf2(value + 1)); + } +} + +// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl. +void InstructionCodeGeneratorARMVIXL::GenerateOrrConst(vixl32::Register out, + vixl32::Register first, + uint32_t value) { + // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier). + if (value == 0u) { + if (!out.Is(first)) { + __ Mov(out, first); + } + return; + } + if (value == 0xffffffffu) { + __ Mvn(out, 0); + return; + } + if (GetAssembler()->ShifterOperandCanHold(ORR, value)) { + __ Orr(out, first, value); + } else { + DCHECK(GetAssembler()->ShifterOperandCanHold(ORN, ~value)); + __ Orn(out, first, ~value); + } +} + +// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl. +void InstructionCodeGeneratorARMVIXL::GenerateEorConst(vixl32::Register out, + vixl32::Register first, + uint32_t value) { + // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier). + if (value == 0u) { + if (!out.Is(first)) { + __ Mov(out, first); + } + return; + } + __ Eor(out, first, value); +} + +void InstructionCodeGeneratorARMVIXL::GenerateAddLongConst(Location out, + Location first, + uint64_t value) { + vixl32::Register out_low = LowRegisterFrom(out); + vixl32::Register out_high = HighRegisterFrom(out); + vixl32::Register first_low = LowRegisterFrom(first); + vixl32::Register first_high = HighRegisterFrom(first); + uint32_t value_low = Low32Bits(value); + uint32_t value_high = High32Bits(value); + if (value_low == 0u) { + if (!out_low.Is(first_low)) { + __ Mov(out_low, first_low); + } + __ Add(out_high, first_high, value_high); + return; + } + __ Adds(out_low, first_low, value_low); + if (GetAssembler()->ShifterOperandCanHold(ADC, value_high)) { + __ Adc(out_high, first_high, value_high); + } else { + DCHECK(GetAssembler()->ShifterOperandCanHold(SBC, ~value_high)); + __ Sbc(out_high, first_high, ~value_high); + } +} + +void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + Location out = locations->Out(); + + if (second.IsConstant()) { + uint64_t value = static_cast(Int64FromConstant(second.GetConstant())); + uint32_t value_low = Low32Bits(value); + if (instruction->GetResultType() == DataType::Type::kInt32) { + vixl32::Register first_reg = InputRegisterAt(instruction, 0); + vixl32::Register out_reg = OutputRegister(instruction); + if (instruction->IsAnd()) { + GenerateAndConst(out_reg, first_reg, value_low); + } else if (instruction->IsOr()) { + GenerateOrrConst(out_reg, first_reg, value_low); + } else { + DCHECK(instruction->IsXor()); + GenerateEorConst(out_reg, first_reg, value_low); + } + } else { + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); + uint32_t value_high = High32Bits(value); + vixl32::Register first_low = LowRegisterFrom(first); + vixl32::Register first_high = HighRegisterFrom(first); + vixl32::Register out_low = LowRegisterFrom(out); + vixl32::Register out_high = HighRegisterFrom(out); + if (instruction->IsAnd()) { + GenerateAndConst(out_low, first_low, value_low); + GenerateAndConst(out_high, first_high, value_high); + } else if (instruction->IsOr()) { + GenerateOrrConst(out_low, first_low, value_low); + GenerateOrrConst(out_high, first_high, value_high); + } else { + DCHECK(instruction->IsXor()); + GenerateEorConst(out_low, first_low, value_low); + GenerateEorConst(out_high, first_high, value_high); + } + } + return; + } + + if (instruction->GetResultType() == DataType::Type::kInt32) { + vixl32::Register first_reg = InputRegisterAt(instruction, 0); + vixl32::Register second_reg = InputRegisterAt(instruction, 1); + vixl32::Register out_reg = OutputRegister(instruction); + if (instruction->IsAnd()) { + __ And(out_reg, first_reg, second_reg); + } else if (instruction->IsOr()) { + __ Orr(out_reg, first_reg, second_reg); + } else { + DCHECK(instruction->IsXor()); + __ Eor(out_reg, first_reg, second_reg); + } + } else { + DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64); + vixl32::Register first_low = LowRegisterFrom(first); + vixl32::Register first_high = HighRegisterFrom(first); + vixl32::Register second_low = LowRegisterFrom(second); + vixl32::Register second_high = HighRegisterFrom(second); + vixl32::Register out_low = LowRegisterFrom(out); + vixl32::Register out_high = HighRegisterFrom(out); + if (instruction->IsAnd()) { + __ And(out_low, first_low, second_low); + __ And(out_high, first_high, second_high); + } else if (instruction->IsOr()) { + __ Orr(out_low, first_low, second_low); + __ Orr(out_high, first_high, second_high); + } else { + DCHECK(instruction->IsXor()); + __ Eor(out_low, first_low, second_low); + __ Eor(out_high, first_high, second_high); + } + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister( + HInstruction* instruction, + Location out, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option) { + vixl32::Register out_reg = RegisterFrom(out); + if (read_barrier_option == kWithReadBarrier) { + CHECK(kEmitCompilerReadBarrier); + DCHECK(maybe_temp.IsRegister()) << maybe_temp; + if (kUseBakerReadBarrier) { + // Load with fast path based Baker's read barrier. + // /* HeapReference */ out = *(out + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false); + } else { + // Load with slow path based read barrier. + // Save the value of `out` into `maybe_temp` before overwriting it + // in the following move operation, as we will need it for the + // read barrier below. + __ Mov(RegisterFrom(maybe_temp), out_reg); + // /* HeapReference */ out = *(out + offset) + GetAssembler()->LoadFromOffset(kLoadWord, out_reg, out_reg, offset); + codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference */ out = *(out + offset) + GetAssembler()->LoadFromOffset(kLoadWord, out_reg, out_reg, offset); + GetAssembler()->MaybeUnpoisonHeapReference(out_reg); + } +} + +void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( + HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option) { + vixl32::Register out_reg = RegisterFrom(out); + vixl32::Register obj_reg = RegisterFrom(obj); + if (read_barrier_option == kWithReadBarrier) { + CHECK(kEmitCompilerReadBarrier); + if (kUseBakerReadBarrier) { + DCHECK(maybe_temp.IsRegister()) << maybe_temp; + // Load with fast path based Baker's read barrier. + // /* HeapReference */ out = *(obj + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false); + } else { + // Load with slow path based read barrier. + // /* HeapReference */ out = *(obj + offset) + GetAssembler()->LoadFromOffset(kLoadWord, out_reg, obj_reg, offset); + codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference */ out = *(obj + offset) + GetAssembler()->LoadFromOffset(kLoadWord, out_reg, obj_reg, offset); + GetAssembler()->MaybeUnpoisonHeapReference(out_reg); + } +} + +void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( + HInstruction* instruction, + Location root, + vixl32::Register obj, + uint32_t offset, + ReadBarrierOption read_barrier_option) { + vixl32::Register root_reg = RegisterFrom(root); + if (read_barrier_option == kWithReadBarrier) { + DCHECK(kEmitCompilerReadBarrier); + if (kUseBakerReadBarrier) { + // Fast path implementation of art::ReadBarrier::BarrierForRoot when + // Baker's read barrier are used. + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in + // the Marking Register) to decide whether we need to enter + // the slow path to mark the GC root. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the reference + // and jumps to the entrypoint if needed. + // + // lr = &return_address; + // GcRoot root = *(obj+offset); // Original reference load. + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto gc_root_thunk(lr) + // } + // return_address: + + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + bool narrow = CanEmitNarrowLdr(root_reg, obj, offset); + uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow); + + size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u) + /* LDR */ (narrow ? 1u : 0u); + size_t wide_instructions = /* ADR+CMP+LDR+BNE */ 4u - narrow_instructions; + size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes + + narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes; + ExactAssemblyScope guard(GetVIXLAssembler(), exact_size); + vixl32::Label return_address; + EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address); + __ cmp(mr, Operand(0)); + // Currently the offset is always within range. If that changes, + // we shall have to split the load the same way as for fields. + DCHECK_LT(offset, kReferenceLoadMinFarOffset); + ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset(); + __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset)); + EmitBakerReadBarrierBne(custom_data); + __ bind(&return_address); + DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), + narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET + : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET); + } else { + // GC root loaded through a slow path for read barriers other + // than Baker's. + // /* GcRoot* */ root = obj + offset + __ Add(root_reg, obj, offset); + // /* mirror::Object* */ root = root->Read() + GenerateReadBarrierForRootSlow(instruction, root, root); + } + } else { + // Plain GC root load with no read barrier. + // /* GcRoot */ root = *(obj + offset) + GetAssembler()->LoadFromOffset(kLoadWord, root_reg, obj, offset); + // Note that GC roots are not affected by heap poisoning, thus we + // do not have to unpoison `root_reg` here. + } + MaybeGenerateMarkingRegisterCheck(/* code= */ 19); +} + +void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier( + vixl::aarch32::Register old_value, + vixl::aarch32::Register adjusted_old_value, + vixl::aarch32::Register expected) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with an ADD instead of LDR. + uint32_t custom_data = EncodeBakerReadBarrierUnsafeCasData(old_value.GetCode()); + + size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u); + size_t wide_instructions = /* ADR+CMP+ADD+BNE */ 4u - narrow_instructions; + size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes + + narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes; + ExactAssemblyScope guard(GetVIXLAssembler(), exact_size); + vixl32::Label return_address; + EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address); + __ cmp(mr, Operand(0)); + ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset(); + __ add(EncodingSize(Wide), old_value, adjusted_old_value, Operand(expected)); // Preserves flags. + EmitBakerReadBarrierBne(custom_data); + __ bind(&return_address); + DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), + BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ADD_OFFSET); +} + +void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl32::Register obj, + const vixl32::MemOperand& src, + bool needs_null_check) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the + // Marking Register) to decide whether we need to enter the slow + // path to mark the reference. Then, in the slow path, check the + // gray bit in the lock word of the reference's holder (`obj`) to + // decide whether to mark `ref` or not. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the holder + // and jumps to the entrypoint if needed. If the holder is not gray, + // it creates a fake dependency and returns to the LDR instruction. + // + // lr = &gray_return_address; + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto field_thunk(lr) + // } + // not_gray_return_address: + // // Original reference load. If the offset is too large to fit + // // into LDR, we use an adjusted base register here. + // HeapReference reference = *(obj+offset); + // gray_return_address: + + DCHECK(src.GetAddrMode() == vixl32::Offset); + DCHECK_ALIGNED(src.GetOffsetImmediate(), sizeof(mirror::HeapReference)); + vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); + bool narrow = CanEmitNarrowLdr(ref_reg, src.GetBaseRegister(), src.GetOffsetImmediate()); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + uint32_t custom_data = + EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode(), narrow); + + { + size_t narrow_instructions = + /* CMP */ (mr.IsLow() ? 1u : 0u) + + /* LDR+unpoison? */ (narrow ? (kPoisonHeapReferences ? 2u : 1u) : 0u); + size_t wide_instructions = + /* ADR+CMP+LDR+BNE+unpoison? */ (kPoisonHeapReferences ? 5u : 4u) - narrow_instructions; + size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes + + narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes; + ExactAssemblyScope guard(GetVIXLAssembler(), exact_size); + vixl32::Label return_address; + EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address); + __ cmp(mr, Operand(0)); + EmitBakerReadBarrierBne(custom_data); + ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset(); + __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, src); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + // Note: We need a specific width for the unpoisoning NEG. + if (kPoisonHeapReferences) { + if (narrow) { + // The only 16-bit encoding is T1 which sets flags outside IT block (i.e. RSBS, not RSB). + __ rsbs(EncodingSize(Narrow), ref_reg, ref_reg, Operand(0)); + } else { + __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0)); + } + } + __ bind(&return_address); + DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), + narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET + : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET); + } + MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip)); +} + +void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl32::Register obj, + uint32_t offset, + Location temp, + bool needs_null_check) { + DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference)); + vixl32::Register base = obj; + if (offset >= kReferenceLoadMinFarOffset) { + base = RegisterFrom(temp); + static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2."); + __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u))); + offset &= (kReferenceLoadMinFarOffset - 1u); + } + GenerateFieldLoadWithBakerReadBarrier( + instruction, ref, obj, MemOperand(base, offset), needs_null_check); +} + +void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, + vixl32::Register obj, + uint32_t data_offset, + Location index, + Location temp, + bool needs_null_check) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + ScaleFactor scale_factor = TIMES_4; + + // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the + // Marking Register) to decide whether we need to enter the slow + // path to mark the reference. Then, in the slow path, check the + // gray bit in the lock word of the reference's holder (`obj`) to + // decide whether to mark `ref` or not. + // + // We use shared thunks for the slow path; shared within the method + // for JIT, across methods for AOT. That thunk checks the holder + // and jumps to the entrypoint if needed. If the holder is not gray, + // it creates a fake dependency and returns to the LDR instruction. + // + // lr = &gray_return_address; + // if (mr) { // Thread::Current()->GetIsGcMarking() + // goto array_thunk(lr) + // } + // not_gray_return_address: + // // Original reference load. If the offset is too large to fit + // // into LDR, we use an adjusted base register here. + // HeapReference reference = data[index]; + // gray_return_address: + + DCHECK(index.IsValid()); + vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32); + vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference); + vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32); // Raw pointer. + + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode()); + + __ Add(data_reg, obj, Operand(data_offset)); + { + size_t narrow_instructions = /* CMP */ (mr.IsLow() ? 1u : 0u); + size_t wide_instructions = + /* ADR+CMP+BNE+LDR+unpoison? */ (kPoisonHeapReferences ? 5u : 4u) - narrow_instructions; + size_t exact_size = wide_instructions * vixl32::k32BitT32InstructionSizeInBytes + + narrow_instructions * vixl32::k16BitT32InstructionSizeInBytes; + ExactAssemblyScope guard(GetVIXLAssembler(), exact_size); + vixl32::Label return_address; + EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address); + __ cmp(mr, Operand(0)); + EmitBakerReadBarrierBne(custom_data); + ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset(); + __ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor)); + DCHECK(!needs_null_check); // The thunk cannot handle the null check. + // Note: We need a Wide NEG for the unpoisoning. + if (kPoisonHeapReferences) { + __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0)); + } + __ bind(&return_address); + DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), + BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); + } + MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip)); +} + +void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { + // The following condition is a compile-time one, so it does not have a run-time cost. + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + // The following condition is a run-time one; it is executed after the + // previous compile-time test, to avoid penalizing non-debug builds. + if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temp_loc.IsValid() ? RegisterFrom(temp_loc) : temps.Acquire(); + GetAssembler()->GenerateMarkingRegisterCheck(temp, + kMarkingRegisterCheckBreakCodeBaseCode + code); + } + } +} + +void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { + DCHECK(kEmitCompilerReadBarrier); + + // Insert a slow path based read barrier *after* the reference load. + // + // If heap poisoning is enabled, the unpoisoning of the loaded + // reference will be carried out by the runtime within the slow + // path. + // + // Note that `ref` currently does not get unpoisoned (when heap + // poisoning is enabled), which is alright as the `ref` argument is + // not used by the artReadBarrierSlow entry point. + // + // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. + SlowPathCodeARMVIXL* slow_path = new (GetScopedAllocator()) + ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index); + AddSlowPath(slow_path); + + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { + if (kEmitCompilerReadBarrier) { + // Baker's read barriers shall be handled by the fast path + // (CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier). + DCHECK(!kUseBakerReadBarrier); + // If heap poisoning is enabled, unpoisoning will be taken care of + // by the runtime within the slow path. + GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index); + } else if (kPoisonHeapReferences) { + GetAssembler()->UnpoisonHeapReference(RegisterFrom(out)); + } +} + +void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruction, + Location out, + Location root) { + DCHECK(kEmitCompilerReadBarrier); + + // Insert a slow path based read barrier *after* the GC root load. + // + // Note that GC roots are not affected by heap poisoning, so we do + // not need to do anything special for this here. + SlowPathCodeARMVIXL* slow_path = + new (GetScopedAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root); + AddSlowPath(slow_path); + + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +// Check if the desired_dispatch_info is supported. If it is, return it, +// otherwise return a fall-back info that should be used instead. +HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch( + const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, + ArtMethod* method ATTRIBUTE_UNUSED) { + return desired_dispatch_info; +} + +vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter( + HInvokeStaticOrDirect* invoke, vixl32::Register temp) { + DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u); + Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); + if (!invoke->GetLocations()->Intrinsified()) { + return RegisterFrom(location); + } + // For intrinsics we allow any location, so it may be on the stack. + if (!location.IsRegister()) { + GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, location.GetStackIndex()); + return temp; + } + // For register locations, check if the register was saved. If so, get it from the stack. + // Note: There is a chance that the register was saved but not overwritten, so we could + // save one load. However, since this is just an intrinsic slow path we prefer this + // simple and more robust approach rather that trying to determine if that's the case. + SlowPathCode* slow_path = GetCurrentSlowPath(); + if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(RegisterFrom(location).GetCode())) { + int stack_offset = slow_path->GetStackOffsetOfCoreRegister(RegisterFrom(location).GetCode()); + GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, stack_offset); + return temp; + } + return RegisterFrom(location); +} + +void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) { + Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp. + switch (invoke->GetMethodLoadKind()) { + case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: { + uint32_t offset = + GetThreadOffset(invoke->GetStringInitEntryPoint()).Int32Value(); + // temp = thread->string_init_entrypoint + GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kRecursive: + callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); + break; + case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: { + DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()); + PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetTargetMethod()); + vixl32::Register temp_reg = RegisterFrom(temp); + EmitMovwMovtPlaceholder(labels, temp_reg); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { + uint32_t boot_image_offset = GetBootImageOffset(invoke); + PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset); + vixl32::Register temp_reg = RegisterFrom(temp); + EmitMovwMovtPlaceholder(labels, temp_reg); + GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { + PcRelativePatchInfo* labels = NewMethodBssEntryPatch( + MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex())); + vixl32::Register temp_reg = RegisterFrom(temp); + EmitMovwMovtPlaceholder(labels, temp_reg); + // All aligned loads are implicitly atomic consume operations on ARM. + GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0); + break; + } + case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress: + __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress())); + break; + case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: { + GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path); + return; // No code pointer retrieval; the runtime performs the call directly. + } + } + + switch (invoke->GetCodePtrLocation()) { + case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf: + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k32BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + __ bl(GetFrameEntryLabel()); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } + break; + case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod: + // LR = callee_method->entry_point_from_quick_compiled_code_ + GetAssembler()->LoadFromOffset( + kLoadWord, + lr, + RegisterFrom(callee_method), + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + // LR() + __ blx(lr); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } + break; + } + + DCHECK(!IsLeafMethod()); +} + +void CodeGeneratorARMVIXL::GenerateVirtualCall( + HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) { + vixl32::Register temp = RegisterFrom(temp_location); + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArmPointerSize).Uint32Value(); + + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConventionARMVIXL calling_convention; + vixl32::Register receiver = calling_convention.GetRegisterAt(0); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + { + // Make sure the pc is recorded immediately after the `ldr` instruction. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + // /* HeapReference */ temp = receiver->klass_ + __ ldr(temp, MemOperand(receiver, class_offset)); + MaybeRecordImplicitNullCheck(invoke); + } + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + GetAssembler()->MaybeUnpoisonHeapReference(temp); + + // If we're compiling baseline, update the inline cache. + MaybeGenerateInlineCacheCheck(invoke, temp); + + // temp = temp->GetMethodAt(method_offset); + uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArmPointerSize).Int32Value(); + GetAssembler()->LoadFromOffset(kLoadWord, temp, temp, method_offset); + // LR = temp->GetEntryPoint(); + GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point); + { + // Use a scope to help guarantee that `RecordPcInfo()` records the correct pc. + // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used. + ExactAssemblyScope aas(GetVIXLAssembler(), + vixl32::k16BitT32InstructionSizeInBytes, + CodeBufferCheckScope::kExactSize); + // LR(); + __ blx(lr); + RecordPcInfo(invoke, invoke->GetDexPc(), slow_path); + } +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch( + uint32_t intrinsic_data) { + return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_other_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch( + uint32_t boot_image_offset) { + return NewPcRelativePatch(/* dex_file= */ nullptr, + boot_image_offset, + &boot_image_other_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageMethodPatch( + MethodReference target_method) { + return NewPcRelativePatch( + target_method.dex_file, target_method.index, &boot_image_method_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewMethodBssEntryPatch( + MethodReference target_method) { + return NewPcRelativePatch( + target_method.dex_file, target_method.index, &method_bss_entry_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageTypePatch( + const DexFile& dex_file, dex::TypeIndex type_index) { + return NewPcRelativePatch(&dex_file, type_index.index_, &boot_image_type_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewTypeBssEntryPatch( + const DexFile& dex_file, dex::TypeIndex type_index) { + return NewPcRelativePatch(&dex_file, type_index.index_, &type_bss_entry_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageStringPatch( + const DexFile& dex_file, dex::StringIndex string_index) { + return NewPcRelativePatch(&dex_file, string_index.index_, &boot_image_string_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewStringBssEntryPatch( + const DexFile& dex_file, dex::StringIndex string_index) { + return NewPcRelativePatch(&dex_file, string_index.index_, &string_bss_entry_patches_); +} + +CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePatch( + const DexFile* dex_file, uint32_t offset_or_index, ArenaDeque* patches) { + patches->emplace_back(dex_file, offset_or_index); + return &patches->back(); +} + +void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) { + DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. + DCHECK(!Runtime::Current()->UseJitCompilation()); + call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value()); + vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label; + __ bind(bl_label); + vixl32::Label placeholder_label; + __ bl(&placeholder_label); // Placeholder, patched at link-time. + __ bind(&placeholder_label); +} + +void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) { + DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. + if (Runtime::Current()->UseJitCompilation()) { + auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data); + vixl::aarch32::Label* slow_path_entry = &it->second.label; + __ b(ne, EncodingSize(Wide), slow_path_entry); + } else { + baker_read_barrier_patches_.emplace_back(custom_data); + vixl::aarch32::Label* patch_label = &baker_read_barrier_patches_.back().label; + __ bind(patch_label); + vixl32::Label placeholder_label; + __ b(ne, EncodingSize(Wide), &placeholder_label); // Placeholder, patched at link-time. + __ bind(&placeholder_label); + } +} + +VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) { + return DeduplicateUint32Literal(address, &uint32_literals_); +} + +VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral( + const DexFile& dex_file, + dex::StringIndex string_index, + Handle handle) { + ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); + return jit_string_patches_.GetOrCreate( + StringReference(&dex_file, string_index), + [this]() { + return GetAssembler()->CreateLiteralDestroyedWithPool(/* value= */ 0u); + }); +} + +VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file, + dex::TypeIndex type_index, + Handle handle) { + ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); + return jit_class_patches_.GetOrCreate( + TypeReference(&dex_file, type_index), + [this]() { + return GetAssembler()->CreateLiteralDestroyedWithPool(/* value= */ 0u); + }); +} + +void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg, + uint32_t boot_image_reference) { + if (GetCompilerOptions().IsBootImage()) { + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + NewBootImageIntrinsicPatch(boot_image_reference); + EmitMovwMovtPlaceholder(labels, reg); + } else if (GetCompilerOptions().GetCompilePic()) { + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = + NewBootImageRelRoPatch(boot_image_reference); + EmitMovwMovtPlaceholder(labels, reg); + __ Ldr(reg, MemOperand(reg, /* offset= */ 0)); + } else { + DCHECK(Runtime::Current()->UseJitCompilation()); + gc::Heap* heap = Runtime::Current()->GetHeap(); + DCHECK(!heap->GetBootImageSpaces().empty()); + uintptr_t address = + reinterpret_cast(heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference); + __ Ldr(reg, DeduplicateBootImageAddressLiteral(dchecked_integral_cast(address))); + } +} + +void CodeGeneratorARMVIXL::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, + uint32_t boot_image_offset) { + DCHECK(invoke->IsStatic()); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + vixl32::Register argument = calling_convention.GetRegisterAt(0); + if (GetCompilerOptions().IsBootImage()) { + DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference); + // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative. + MethodReference target_method = invoke->GetTargetMethod(); + dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_; + PcRelativePatchInfo* labels = NewBootImageTypePatch(*target_method.dex_file, type_idx); + EmitMovwMovtPlaceholder(labels, argument); + } else { + LoadBootImageAddress(argument, boot_image_offset); + } + InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes(); +} + +template +inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches( + const ArenaDeque& infos, + ArenaVector* linker_patches) { + for (const PcRelativePatchInfo& info : infos) { + const DexFile* dex_file = info.target_dex_file; + size_t offset_or_index = info.offset_or_index; + DCHECK(info.add_pc_label.IsBound()); + uint32_t add_pc_offset = dchecked_integral_cast(info.add_pc_label.GetLocation()); + // Add MOVW patch. + DCHECK(info.movw_label.IsBound()); + uint32_t movw_offset = dchecked_integral_cast(info.movw_label.GetLocation()); + linker_patches->push_back(Factory(movw_offset, dex_file, add_pc_offset, offset_or_index)); + // Add MOVT patch. + DCHECK(info.movt_label.IsBound()); + uint32_t movt_offset = dchecked_integral_cast(info.movt_label.GetLocation()); + linker_patches->push_back(Factory(movt_offset, dex_file, add_pc_offset, offset_or_index)); + } +} + +template +linker::LinkerPatch NoDexFileAdapter(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t boot_image_offset) { + DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null. + return Factory(literal_offset, pc_insn_offset, boot_image_offset); +} + +void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector* linker_patches) { + DCHECK(linker_patches->empty()); + size_t size = + /* MOVW+MOVT for each entry */ 2u * boot_image_method_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * method_bss_entry_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * boot_image_type_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * boot_image_string_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * string_bss_entry_patches_.size() + + /* MOVW+MOVT for each entry */ 2u * boot_image_other_patches_.size() + + call_entrypoint_patches_.size() + + baker_read_barrier_patches_.size(); + linker_patches->reserve(size); + if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) { + EmitPcRelativeLinkerPatches( + boot_image_method_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + boot_image_type_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + boot_image_string_patches_, linker_patches); + } else { + DCHECK(boot_image_method_patches_.empty()); + DCHECK(boot_image_type_patches_.empty()); + DCHECK(boot_image_string_patches_.empty()); + } + if (GetCompilerOptions().IsBootImage()) { + EmitPcRelativeLinkerPatches>( + boot_image_other_patches_, linker_patches); + } else { + EmitPcRelativeLinkerPatches>( + boot_image_other_patches_, linker_patches); + } + EmitPcRelativeLinkerPatches( + method_bss_entry_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + type_bss_entry_patches_, linker_patches); + EmitPcRelativeLinkerPatches( + string_bss_entry_patches_, linker_patches); + for (const PatchInfo& info : call_entrypoint_patches_) { + DCHECK(info.target_dex_file == nullptr); + linker_patches->push_back(linker::LinkerPatch::CallEntrypointPatch( + info.label.GetLocation(), info.offset_or_index)); + } + for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { + linker_patches->push_back(linker::LinkerPatch::BakerReadBarrierBranchPatch( + info.label.GetLocation(), info.custom_data)); + } + DCHECK_EQ(size, linker_patches->size()); +} + +bool CodeGeneratorARMVIXL::NeedsThunkCode(const linker::LinkerPatch& patch) const { + return patch.GetType() == linker::LinkerPatch::Type::kCallEntrypoint || + patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch || + patch.GetType() == linker::LinkerPatch::Type::kCallRelative; +} + +void CodeGeneratorARMVIXL::EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector* code, + /*out*/ std::string* debug_name) { + arm::ArmVIXLAssembler assembler(GetGraph()->GetAllocator()); + switch (patch.GetType()) { + case linker::LinkerPatch::Type::kCallRelative: { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize); + assembler.LoadFromOffset(arm::kLoadWord, vixl32::pc, vixl32::r0, offset.Int32Value()); + assembler.GetVIXLAssembler()->Bkpt(0); + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + *debug_name = "MethodCallThunk"; + } + break; + } + case linker::LinkerPatch::Type::kCallEntrypoint: { + assembler.LoadFromOffset(arm::kLoadWord, vixl32::pc, tr, patch.EntrypointOffset()); + assembler.GetVIXLAssembler()->Bkpt(0); + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + *debug_name = "EntrypointCallThunk_" + std::to_string(patch.EntrypointOffset()); + } + break; + } + case linker::LinkerPatch::Type::kBakerReadBarrierBranch: { + DCHECK_EQ(patch.GetBakerCustomValue2(), 0u); + CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); + break; + } + default: + LOG(FATAL) << "Unexpected patch type " << patch.GetType(); + UNREACHABLE(); + } + + // Ensure we emit the literal pool if any. + assembler.FinalizeCode(); + code->resize(assembler.CodeSize()); + MemoryRegion code_region(code->data(), code->size()); + assembler.FinalizeInstructions(code_region); +} + +VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal( + uint32_t value, + Uint32ToLiteralMap* map) { + return map->GetOrCreate( + value, + [this, value]() { + return GetAssembler()->CreateLiteralDestroyedWithPool(/* value= */ value); + }); +} + +void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall); + locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex, + Location::RequiresRegister()); + locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); + locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) { + vixl32::Register res = OutputRegister(instr); + vixl32::Register accumulator = + InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex); + vixl32::Register mul_left = + InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex); + vixl32::Register mul_right = + InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex); + + if (instr->GetOpKind() == HInstruction::kAdd) { + __ Mla(res, mul_left, mul_right, accumulator); + } else { + __ Mls(res, mul_left, mul_right, accumulator); + } +} + +void LocationsBuilderARMVIXL::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, this should be removed during prepare for register allocator. + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARMVIXL::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, this should be removed during prepare for register allocator. + LOG(FATAL) << "Unreachable"; +} + +// Simple implementation of packed switch - generate cascaded compare/jumps. +void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold && + codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) { + locations->AddTemp(Location::RequiresRegister()); // We need a temp for the table base. + if (switch_instr->GetStartValue() != 0) { + locations->AddTemp(Location::RequiresRegister()); // We need a temp for the bias. + } + } +} + +// TODO(VIXL): Investigate and reach the parity with old arm codegen. +void InstructionCodeGeneratorARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + LocationSummary* locations = switch_instr->GetLocations(); + vixl32::Register value_reg = InputRegisterAt(switch_instr, 0); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + if (num_entries <= kPackedSwitchCompareJumpThreshold || + !codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) { + // Create a series of compare/jumps. + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp_reg = temps.Acquire(); + // Note: It is fine for the below AddConstantSetFlags() using IP register to temporarily store + // the immediate, because IP is used as the destination register. For the other + // AddConstantSetFlags() and GenerateCompareWithImmediate(), the immediate values are constant, + // and they can be encoded in the instruction without making use of IP register. + __ Adds(temp_reg, value_reg, -lower_bound); + + const ArenaVector& successors = switch_instr->GetBlock()->GetSuccessors(); + // Jump to successors[0] if value == lower_bound. + __ B(eq, codegen_->GetLabelOf(successors[0])); + int32_t last_index = 0; + for (; num_entries - last_index > 2; last_index += 2) { + __ Adds(temp_reg, temp_reg, -2); + // Jump to successors[last_index + 1] if value < case_value[last_index + 2]. + __ B(lo, codegen_->GetLabelOf(successors[last_index + 1])); + // Jump to successors[last_index + 2] if value == case_value[last_index + 2]. + __ B(eq, codegen_->GetLabelOf(successors[last_index + 2])); + } + if (num_entries - last_index == 2) { + // The last missing case_value. + __ Cmp(temp_reg, 1); + __ B(eq, codegen_->GetLabelOf(successors[last_index + 1])); + } + + // And the default for any other value. + if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { + __ B(codegen_->GetLabelOf(default_block)); + } + } else { + // Create a table lookup. + vixl32::Register table_base = RegisterFrom(locations->GetTemp(0)); + + JumpTableARMVIXL* jump_table = codegen_->CreateJumpTable(switch_instr); + + // Remove the bias. + vixl32::Register key_reg; + if (lower_bound != 0) { + key_reg = RegisterFrom(locations->GetTemp(1)); + __ Sub(key_reg, value_reg, lower_bound); + } else { + key_reg = value_reg; + } + + // Check whether the value is in the table, jump to default block if not. + __ Cmp(key_reg, num_entries - 1); + __ B(hi, codegen_->GetLabelOf(default_block)); + + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register jump_offset = temps.Acquire(); + + // Load jump offset from the table. + { + const size_t jump_size = switch_instr->GetNumEntries() * sizeof(int32_t); + ExactAssemblyScope aas(GetVIXLAssembler(), + (vixl32::kMaxInstructionSizeInBytes * 4) + jump_size, + CodeBufferCheckScope::kMaximumSize); + __ adr(table_base, jump_table->GetTableStartLabel()); + __ ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2)); + + // Jump to target block by branching to table_base(pc related) + offset. + vixl32::Register target_address = table_base; + __ add(target_address, table_base, jump_offset); + __ bx(target_address); + + jump_table->EmitTable(codegen_); + } + } +} + +// Copy the result of a call into the given target. +void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, DataType::Type type) { + if (!trg.IsValid()) { + DCHECK_EQ(type, DataType::Type::kVoid); + return; + } + + DCHECK_NE(type, DataType::Type::kVoid); + + Location return_loc = InvokeDexCallingConventionVisitorARMVIXL().GetReturnLocation(type); + if (return_loc.Equals(trg)) { + return; + } + + // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged + // with the last branch. + if (type == DataType::Type::kInt64) { + TODO_VIXL32(FATAL); + } else if (type == DataType::Type::kFloat64) { + TODO_VIXL32(FATAL); + } else { + // Let the parallel move resolver take care of all of this. + HParallelMove parallel_move(GetGraph()->GetAllocator()); + parallel_move.AddMove(return_loc, trg, type, nullptr); + GetMoveResolver()->EmitNativeCode(¶llel_move); + } +} + +void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) { + LocationSummary* locations = + new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARMVIXL::VisitClassTableGet(HClassTableGet* instruction) { + if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + instruction->GetIndex(), kArmPointerSize).SizeValue(); + GetAssembler()->LoadFromOffset(kLoadWord, + OutputRegister(instruction), + InputRegisterAt(instruction, 0), + method_offset); + } else { + uint32_t method_offset = static_cast(ImTable::OffsetOfElement( + instruction->GetIndex(), kArmPointerSize)); + GetAssembler()->LoadFromOffset(kLoadWord, + OutputRegister(instruction), + InputRegisterAt(instruction, 0), + mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value()); + GetAssembler()->LoadFromOffset(kLoadWord, + OutputRegister(instruction), + OutputRegister(instruction), + method_offset); + } +} + +static void PatchJitRootUse(uint8_t* code, + const uint8_t* roots_data, + VIXLUInt32Literal* literal, + uint64_t index_in_table) { + DCHECK(literal->IsBound()); + uint32_t literal_offset = literal->GetLocation(); + uintptr_t address = + reinterpret_cast(roots_data) + index_in_table * sizeof(GcRoot); + uint8_t* data = code + literal_offset; + reinterpret_cast(data)[0] = dchecked_integral_cast(address); +} + +void CodeGeneratorARMVIXL::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { + for (const auto& entry : jit_string_patches_) { + const StringReference& string_reference = entry.first; + VIXLUInt32Literal* table_entry_literal = entry.second; + uint64_t index_in_table = GetJitStringRootIndex(string_reference); + PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); + } + for (const auto& entry : jit_class_patches_) { + const TypeReference& type_reference = entry.first; + VIXLUInt32Literal* table_entry_literal = entry.second; + uint64_t index_in_table = GetJitClassRootIndex(type_reference); + PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table); + } +} + +void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder( + CodeGeneratorARMVIXL::PcRelativePatchInfo* labels, + vixl32::Register out) { + ExactAssemblyScope aas(GetVIXLAssembler(), + 3 * vixl32::kMaxInstructionSizeInBytes, + CodeBufferCheckScope::kMaximumSize); + // TODO(VIXL): Think about using mov instead of movw. + __ bind(&labels->movw_label); + __ movw(out, /* operand= */ 0u); + __ bind(&labels->movt_label); + __ movt(out, /* operand= */ 0u); + __ bind(&labels->add_pc_label); + __ add(out, out, pc); +} + +#undef __ +#undef QUICK_ENTRY_POINT +#undef TODO_VIXL32 + +#define __ assembler.GetVIXLAssembler()-> + +static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler, + vixl32::Register base_reg, + vixl32::MemOperand& lock_word, + vixl32::Label* slow_path, + int32_t raw_ldr_offset, + vixl32::Label* throw_npe = nullptr) { + // Load the lock word containing the rb_state. + __ Ldr(ip, lock_word); + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted)); + __ B(ne, slow_path, /* is_far_target= */ false); + // To throw NPE, we return to the fast path; the artificial dependence below does not matter. + if (throw_npe != nullptr) { + __ Bind(throw_npe); + } + __ Add(lr, lr, raw_ldr_offset); + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + __ Add(base_reg, base_reg, Operand(ip, LSR, 32)); + __ Bx(lr); // And return back to the function. + // Note: The fake dependency is unnecessary for the slow path. +} + +// Load the read barrier introspection entrypoint in register `entrypoint` +static vixl32::Register LoadReadBarrierMarkIntrospectionEntrypoint(ArmVIXLAssembler& assembler) { + // The register where the read barrier introspection entrypoint is loaded + // is the marking register. We clobber it here and the entrypoint restores it to 1. + vixl32::Register entrypoint = mr; + // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection. + DCHECK_EQ(ip.GetCode(), 12u); + const int32_t entry_point_offset = + Thread::ReadBarrierMarkEntryPointsOffset(ip.GetCode()); + __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); + return entrypoint; +} + +void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler, + uint32_t encoded_data, + /*out*/ std::string* debug_name) { + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + switch (kind) { + case BakerReadBarrierKind::kField: { + vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + vixl32::Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data)); + CheckValidReg(holder_reg.GetCode()); + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + // In the case of a field load, if `base_reg` differs from + // `holder_reg`, the offset was too large and we must have emitted (during the construction + // of the HIR graph, see `art::HInstructionBuilder::BuildInstanceFieldAccess`) and preserved + // (see `art::PrepareForRegisterAllocation::VisitNullCheck`) an explicit null check before + // the load. Otherwise, for implicit null checks, we need to null-check the holder as we do + // not necessarily do that check before going to the thunk. + vixl32::Label throw_npe_label; + vixl32::Label* throw_npe = nullptr; + if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) { + throw_npe = &throw_npe_label; + __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false); + } + // Check if the holder is gray and, if not, add fake dependency to the base register + // and return to the LDR instruction to load the reference. Otherwise, use introspection + // to load the reference and call the entrypoint that performs further checks on the + // reference and marks it if needed. + vixl32::Label slow_path; + MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); + const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide) + ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET + : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET; + EmitGrayCheckAndFastPath( + assembler, base_reg, lock_word, &slow_path, raw_ldr_offset, throw_npe); + __ Bind(&slow_path); + const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + + raw_ldr_offset; + vixl32::Register ep_reg = LoadReadBarrierMarkIntrospectionEntrypoint(assembler); + if (width == BakerReadBarrierWidth::kWide) { + MemOperand ldr_half_address(lr, ldr_offset + 2); + __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12". + __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12. + __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference. + } else { + MemOperand ldr_address(lr, ldr_offset); + __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1. + __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint + ep_reg, // for narrow LDR. + Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET)); + __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4. + __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference. + } + // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. + __ Bx(ep_reg); // Jump to the entrypoint. + break; + } + case BakerReadBarrierKind::kArray: { + vixl32::Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + vixl32::Label slow_path; + int32_t data_offset = + mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); + MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); + DCHECK_LT(lock_word.GetOffsetImmediate(), 0); + const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET; + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); + __ Bind(&slow_path); + const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + + raw_ldr_offset; + MemOperand ldr_address(lr, ldr_offset + 2); + __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm", + // i.e. Rm+32 because the scale in imm2 is 2. + vixl32::Register ep_reg = LoadReadBarrierMarkIntrospectionEntrypoint(assembler); + __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create + // a switch case target based on the index register. + __ Mov(ip, base_reg); // Move the base register to ip0. + __ Bx(ep_reg); // Jump to the entrypoint's array switch case. + break; + } + case BakerReadBarrierKind::kGcRoot: + case BakerReadBarrierKind::kUnsafeCas: { + // Check if the reference needs to be marked and if so (i.e. not null, not marked yet + // and it does not have a forwarding address), call the correct introspection entrypoint; + // otherwise return the reference (or the extracted forwarding address). + // There is no gray bit check for GC roots. + vixl32::Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(root_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip); + vixl32::Label return_label, not_marked, forwarding_address; + __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false); + MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value()); + __ Ldr(ip, lock_word); + __ Tst(ip, LockWord::kMarkBitStateMaskShifted); + __ B(eq, ¬_marked); + __ Bind(&return_label); + __ Bx(lr); + __ Bind(¬_marked); + static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3, + "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in " + " the highest bits and the 'forwarding address' state to have all bits set"); + __ Cmp(ip, Operand(0xc0000000)); + __ B(hs, &forwarding_address); + vixl32::Register ep_reg = LoadReadBarrierMarkIntrospectionEntrypoint(assembler); + // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister + // to one of art_quick_read_barrier_mark_introspection_{gc_roots_{wide,narrow},unsafe_cas}. + DCHECK(kind != BakerReadBarrierKind::kUnsafeCas || width == BakerReadBarrierWidth::kWide); + int32_t entrypoint_offset = + (kind == BakerReadBarrierKind::kGcRoot) + ? (width == BakerReadBarrierWidth::kWide) + ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET + : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET + : BAKER_MARK_INTROSPECTION_UNSAFE_CAS_ENTRYPOINT_OFFSET; + __ Add(ep_reg, ep_reg, Operand(entrypoint_offset)); + __ Mov(ip, root_reg); + __ Bx(ep_reg); + __ Bind(&forwarding_address); + __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift); + __ Bx(lr); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast(kind); + UNREACHABLE(); + } + + // For JIT, the slow path is considered part of the compiled method, + // so JIT should pass null as `debug_name`. Tests may not have a runtime. + DCHECK(Runtime::Current() == nullptr || + !Runtime::Current()->UseJitCompilation() || + debug_name == nullptr); + if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { + std::ostringstream oss; + oss << "BakerReadBarrierThunk"; + switch (kind) { + case BakerReadBarrierKind::kField: + oss << "Field"; + if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { + oss << "Wide"; + } + oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kArray: + oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); + break; + case BakerReadBarrierKind::kGcRoot: + oss << "GcRoot"; + if (BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide) { + oss << "Wide"; + } + oss << "_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + case BakerReadBarrierKind::kUnsafeCas: + oss << "UnsafeCas_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + DCHECK(BakerReadBarrierWidthField::Decode(encoded_data) == BakerReadBarrierWidth::kWide); + break; + } + *debug_name = oss.str(); + } +} + +#undef __ + +} // namespace arm +} // namespace art diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h new file mode 100644 index 0000000..48fb082 --- /dev/null +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -0,0 +1,934 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ +#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ + +#include "base/enums.h" +#include "code_generator.h" +#include "common_arm.h" +#include "dex/string_reference.h" +#include "dex/type_reference.h" +#include "driver/compiler_options.h" +#include "nodes.h" +#include "parallel_move_resolver.h" +#include "utils/arm/assembler_arm_vixl.h" + +// TODO(VIXL): make vixl clean wrt -Wshadow. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/macro-assembler-aarch32.h" +#pragma GCC diagnostic pop + +namespace art { + +namespace linker { +class Thumb2RelativePatcherTest; +} // namespace linker + +namespace arm { + +// This constant is used as an approximate margin when emission of veneer and literal pools +// must be blocked. +static constexpr int kMaxMacroInstructionSizeInBytes = + 15 * vixl::aarch32::kMaxInstructionSizeInBytes; + +static const vixl::aarch32::Register kParameterCoreRegistersVIXL[] = { + vixl::aarch32::r1, + vixl::aarch32::r2, + vixl::aarch32::r3 +}; +static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegistersVIXL); +static const vixl::aarch32::SRegister kParameterFpuRegistersVIXL[] = { + vixl::aarch32::s0, + vixl::aarch32::s1, + vixl::aarch32::s2, + vixl::aarch32::s3, + vixl::aarch32::s4, + vixl::aarch32::s5, + vixl::aarch32::s6, + vixl::aarch32::s7, + vixl::aarch32::s8, + vixl::aarch32::s9, + vixl::aarch32::s10, + vixl::aarch32::s11, + vixl::aarch32::s12, + vixl::aarch32::s13, + vixl::aarch32::s14, + vixl::aarch32::s15 +}; +static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegistersVIXL); + +static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0; + +// Callee saves core registers r5, r6, r7, r8 (except when emitting Baker +// read barriers, where it is used as Marking Register), r10, r11, and lr. +static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList::Union( + vixl::aarch32::RegisterList(vixl::aarch32::r5, + vixl::aarch32::r6, + vixl::aarch32::r7), + // Do not consider r8 as a callee-save register with Baker read barriers. + ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ? vixl::aarch32::RegisterList() + : vixl::aarch32::RegisterList(vixl::aarch32::r8)), + vixl::aarch32::RegisterList(vixl::aarch32::r10, + vixl::aarch32::r11, + vixl::aarch32::lr)); + +// Callee saves FP registers s16 to s31 inclusive. +static const vixl::aarch32::SRegisterList kFpuCalleeSaves = + vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16); + +static const vixl::aarch32::Register kRuntimeParameterCoreRegistersVIXL[] = { + vixl::aarch32::r0, + vixl::aarch32::r1, + vixl::aarch32::r2, + vixl::aarch32::r3 +}; +static const size_t kRuntimeParameterCoreRegistersLengthVIXL = + arraysize(kRuntimeParameterCoreRegistersVIXL); +static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = { + vixl::aarch32::s0, + vixl::aarch32::s1, + vixl::aarch32::s2, + vixl::aarch32::s3 +}; +static const size_t kRuntimeParameterFpuRegistersLengthVIXL = + arraysize(kRuntimeParameterFpuRegistersVIXL); + +class LoadClassSlowPathARMVIXL; +class CodeGeneratorARMVIXL; + +using VIXLInt32Literal = vixl::aarch32::Literal; +using VIXLUInt32Literal = vixl::aarch32::Literal; + +class JumpTableARMVIXL : public DeletableArenaObject { + public: + explicit JumpTableARMVIXL(HPackedSwitch* switch_instr) + : switch_instr_(switch_instr), + table_start_(), + bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + uint32_t num_entries = switch_instr_->GetNumEntries(); + for (uint32_t i = 0; i < num_entries; i++) { + VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced); + bb_addresses_.emplace_back(lit); + } + } + + vixl::aarch32::Label* GetTableStartLabel() { return &table_start_; } + + void EmitTable(CodeGeneratorARMVIXL* codegen); + void FixTable(CodeGeneratorARMVIXL* codegen); + + private: + HPackedSwitch* const switch_instr_; + vixl::aarch32::Label table_start_; + ArenaVector> bb_addresses_; + + DISALLOW_COPY_AND_ASSIGN(JumpTableARMVIXL); +}; + +class InvokeRuntimeCallingConventionARMVIXL + : public CallingConvention { + public: + InvokeRuntimeCallingConventionARMVIXL() + : CallingConvention(kRuntimeParameterCoreRegistersVIXL, + kRuntimeParameterCoreRegistersLengthVIXL, + kRuntimeParameterFpuRegistersVIXL, + kRuntimeParameterFpuRegistersLengthVIXL, + kArmPointerSize) {} + + private: + DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConventionARMVIXL); +}; + +class InvokeDexCallingConventionARMVIXL + : public CallingConvention { + public: + InvokeDexCallingConventionARMVIXL() + : CallingConvention(kParameterCoreRegistersVIXL, + kParameterCoreRegistersLengthVIXL, + kParameterFpuRegistersVIXL, + kParameterFpuRegistersLengthVIXL, + kArmPointerSize) {} + + private: + DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL); +}; + +class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor { + public: + InvokeDexCallingConventionVisitorARMVIXL() {} + virtual ~InvokeDexCallingConventionVisitorARMVIXL() {} + + Location GetNextLocation(DataType::Type type) override; + Location GetReturnLocation(DataType::Type type) const override; + Location GetMethodLocation() const override; + + private: + InvokeDexCallingConventionARMVIXL calling_convention; + uint32_t double_index_ = 0; + + DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARMVIXL); +}; + +class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionARMVIXL() {} + + Location GetObjectLocation() const override { + return helpers::LocationFrom(vixl::aarch32::r1); + } + Location GetFieldIndexLocation() const override { + return helpers::LocationFrom(vixl::aarch32::r0); + } + Location GetReturnLocation(DataType::Type type) const override { + return DataType::Is64BitType(type) + ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1) + : helpers::LocationFrom(vixl::aarch32::r0); + } + Location GetSetValueLocation(DataType::Type type, bool is_instance) const override { + return DataType::Is64BitType(type) + ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3) + : (is_instance + ? helpers::LocationFrom(vixl::aarch32::r2) + : helpers::LocationFrom(vixl::aarch32::r1)); + } + Location GetFpuLocation(DataType::Type type) const override { + return DataType::Is64BitType(type) + ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1) + : helpers::LocationFrom(vixl::aarch32::s0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL); +}; + +class SlowPathCodeARMVIXL : public SlowPathCode { + public: + explicit SlowPathCodeARMVIXL(HInstruction* instruction) + : SlowPathCode(instruction), entry_label_(), exit_label_() {} + + vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; } + vixl::aarch32::Label* GetExitLabel() { return &exit_label_; } + + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) override; + + private: + vixl::aarch32::Label entry_label_; + vixl::aarch32::Label exit_label_; + + DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL); +}; + +class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap { + public: + ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen) + : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} + + void EmitMove(size_t index) override; + void EmitSwap(size_t index) override; + void SpillScratch(int reg) override; + void RestoreScratch(int reg) override; + + ArmVIXLAssembler* GetAssembler() const; + + private: + void Exchange(vixl32::Register reg, int mem); + void Exchange(int mem1, int mem2); + + CodeGeneratorARMVIXL* const codegen_; + + DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL); +}; + +class LocationsBuilderARMVIXL : public HGraphVisitor { + public: + LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen) + : HGraphVisitor(graph), codegen_(codegen) {} + +#define DECLARE_VISIT_INSTRUCTION(name, super) \ + void Visit##name(H##name* instr) override; + + FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) + +#undef DECLARE_VISIT_INSTRUCTION + + void VisitInstruction(HInstruction* instruction) override { + LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() + << " (id " << instruction->GetId() << ")"; + } + + private: + void HandleInvoke(HInvoke* invoke); + void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode); + void HandleCondition(HCondition* condition); + void HandleIntegerRotate(LocationSummary* locations); + void HandleLongRotate(LocationSummary* locations); + void HandleShift(HBinaryOperation* operation); + void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); + void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + + Location ArithmeticZeroOrFpuRegister(HInstruction* input); + Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode); + bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode); + + CodeGeneratorARMVIXL* const codegen_; + InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_; + + DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL); +}; + +class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { + public: + InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen); + +#define DECLARE_VISIT_INSTRUCTION(name, super) \ + void Visit##name(H##name* instr) override; + + FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) + FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) + +#undef DECLARE_VISIT_INSTRUCTION + + void VisitInstruction(HInstruction* instruction) override { + LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() + << " (id " << instruction->GetId() << ")"; + } + + ArmVIXLAssembler* GetAssembler() const { return assembler_; } + ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } + + private: + // Generate code for the given suspend check. If not null, `successor` + // is the block to branch to if the suspend check is not needed, and after + // the suspend call. + void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor); + void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path, + vixl32::Register class_reg); + void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, + vixl::aarch32::Register temp, + vixl::aarch32::FlagsUpdate flags_update); + void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); + void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); + void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); + void GenerateAddLongConst(Location out, Location first, uint64_t value); + void HandleBitwiseOperation(HBinaryOperation* operation); + void HandleCondition(HCondition* condition); + void HandleIntegerRotate(HRor* ror); + void HandleLongRotate(HRor* ror); + void HandleShift(HBinaryOperation* operation); + + void GenerateWideAtomicStore(vixl::aarch32::Register addr, + uint32_t offset, + vixl::aarch32::Register value_lo, + vixl::aarch32::Register value_hi, + vixl::aarch32::Register temp1, + vixl::aarch32::Register temp2, + HInstruction* instruction); + void GenerateWideAtomicLoad(vixl::aarch32::Register addr, + uint32_t offset, + vixl::aarch32::Register out_lo, + vixl::aarch32::Register out_hi); + + void HandleFieldSet(HInstruction* instruction, + const FieldInfo& field_info, + bool value_can_be_null); + void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + + void GenerateMinMaxInt(LocationSummary* locations, bool is_min); + void GenerateMinMaxLong(LocationSummary* locations, bool is_min); + void GenerateMinMaxFloat(HInstruction* minmax, bool is_min); + void GenerateMinMaxDouble(HInstruction* minmax, bool is_min); + void GenerateMinMax(HBinaryOperation* minmax, bool is_min); + + // Generate a heap reference load using one register `out`: + // + // out <- *(out + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // + // Location `maybe_temp` is used when generating a read barrier and + // shall be a register in that case; it may be an invalid location + // otherwise. + void GenerateReferenceLoadOneRegister(HInstruction* instruction, + Location out, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option); + // Generate a heap reference load using two different registers + // `out` and `obj`: + // + // out <- *(obj + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // + // Location `maybe_temp` is used when generating a Baker's (fast + // path) read barrier and shall be a register in that case; it may + // be an invalid location otherwise. + void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location maybe_temp, + ReadBarrierOption read_barrier_option); + void GenerateTestAndBranch(HInstruction* instruction, + size_t condition_input_index, + vixl::aarch32::Label* true_target, + vixl::aarch32::Label* false_target, + bool far_target = true); + void GenerateCompareTestAndBranch(HCondition* condition, + vixl::aarch32::Label* true_target, + vixl::aarch32::Label* false_target, + bool is_far_target = true); + void DivRemOneOrMinusOne(HBinaryOperation* instruction); + void DivRemByPowerOfTwo(HBinaryOperation* instruction); + void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); + void GenerateDivRemConstantIntegral(HBinaryOperation* instruction); + void HandleGoto(HInstruction* got, HBasicBlock* successor); + + vixl::aarch32::MemOperand VecAddress( + HVecMemoryOperation* instruction, + // This function may acquire a scratch register. + vixl::aarch32::UseScratchRegisterScope* temps_scope, + /*out*/ vixl32::Register* scratch); + vixl::aarch32::AlignedMemOperand VecAddressUnaligned( + HVecMemoryOperation* instruction, + // This function may acquire a scratch register. + vixl::aarch32::UseScratchRegisterScope* temps_scope, + /*out*/ vixl32::Register* scratch); + + ArmVIXLAssembler* const assembler_; + CodeGeneratorARMVIXL* const codegen_; + + DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL); +}; + +class CodeGeneratorARMVIXL : public CodeGenerator { + public: + CodeGeneratorARMVIXL(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats = nullptr); + virtual ~CodeGeneratorARMVIXL() {} + + void GenerateFrameEntry() override; + void GenerateFrameExit() override; + void Bind(HBasicBlock* block) override; + void MoveConstant(Location destination, int32_t value) override; + void MoveLocation(Location dst, Location src, DataType::Type dst_type) override; + void AddLocationAsTemp(Location location, LocationSummary* locations) override; + + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override; + + size_t GetWordSize() const override { + return static_cast(kArmPointerSize); + } + + size_t GetCalleePreservedFPWidth() const override { + return vixl::aarch32::kSRegSizeInBytes; + } + + HGraphVisitor* GetLocationBuilder() override { return &location_builder_; } + + HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; } + + ArmVIXLAssembler* GetAssembler() override { return &assembler_; } + + const ArmVIXLAssembler& GetAssembler() const override { return assembler_; } + + ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } + + uintptr_t GetAddressOf(HBasicBlock* block) override { + vixl::aarch32::Label* block_entry_label = GetLabelOf(block); + DCHECK(block_entry_label->IsBound()); + return block_entry_label->GetLocation(); + } + + void FixJumpTables(); + void SetupBlockedRegisters() const override; + + void DumpCoreRegister(std::ostream& stream, int reg) const override; + void DumpFloatingPointRegister(std::ostream& stream, int reg) const override; + + ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; } + InstructionSet GetInstructionSet() const override { return InstructionSet::kThumb2; } + + const ArmInstructionSetFeatures& GetInstructionSetFeatures() const; + + // Helper method to move a 32-bit value between two locations. + void Move32(Location destination, Location source); + + void LoadFromShiftedRegOffset(DataType::Type type, + Location out_loc, + vixl::aarch32::Register base, + vixl::aarch32::Register reg_index, + vixl::aarch32::Condition cond = vixl::aarch32::al); + void StoreToShiftedRegOffset(DataType::Type type, + Location out_loc, + vixl::aarch32::Register base, + vixl::aarch32::Register reg_index, + vixl::aarch32::Condition cond = vixl::aarch32::al); + + // Generate code to invoke a runtime entry point. + void InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path = nullptr) override; + + // Generate code to invoke a runtime entry point, but do not record + // PC-related information in a stack map. + void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, + HInstruction* instruction, + SlowPathCode* slow_path); + + // Emit a write barrier. + void MarkGCCard(vixl::aarch32::Register temp, + vixl::aarch32::Register card, + vixl::aarch32::Register object, + vixl::aarch32::Register value, + bool can_be_null); + + void GenerateMemoryBarrier(MemBarrierKind kind); + + vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) { + block = FirstNonEmptyBlock(block); + return &(block_labels_[block->GetBlockId()]); + } + + vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label); + + void Initialize() override { + block_labels_.resize(GetGraph()->GetBlocks().size()); + } + + void Finalize(CodeAllocator* allocator) override; + + bool NeedsTwoRegisters(DataType::Type type) const override { + return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64; + } + + void ComputeSpillMask() override; + + vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; } + + // Check if the desired_string_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + HLoadString::LoadKind GetSupportedLoadStringKind( + HLoadString::LoadKind desired_string_load_kind) override; + + // Check if the desired_class_load_kind is supported. If it is, return it, + // otherwise return a fall-back kind that should be used instead. + HLoadClass::LoadKind GetSupportedLoadClassKind( + HLoadClass::LoadKind desired_class_load_kind) override; + + // Check if the desired_dispatch_info is supported. If it is, return it, + // otherwise return a fall-back info that should be used instead. + HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( + const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, + ArtMethod* method) override; + + void GenerateStaticOrDirectCall( + HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; + void GenerateVirtualCall( + HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override; + + void MoveFromReturnRegister(Location trg, DataType::Type type) override; + + // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types, + // whether through .data.bimg.rel.ro, .bss, or directly in the boot image. + // + // The PC-relative address is loaded with three instructions, + // MOVW+MOVT to load the offset to base_reg and then ADD base_reg, PC. The offset + // is calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we + // currently emit these 3 instructions together, instruction scheduling could + // split this sequence apart, so we keep separate labels for each of them. + struct PcRelativePatchInfo { + PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx) + : target_dex_file(dex_file), offset_or_index(off_or_idx) { } + PcRelativePatchInfo(PcRelativePatchInfo&& other) = default; + + // Target dex file or null for .data.bmig.rel.ro patches. + const DexFile* target_dex_file; + // Either the boot image offset (to write to .data.bmig.rel.ro) or string/type/method index. + uint32_t offset_or_index; + vixl::aarch32::Label movw_label; + vixl::aarch32::Label movt_label; + vixl::aarch32::Label add_pc_label; + }; + + PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data); + PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset); + PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method); + PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method); + PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index); + PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index); + PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file, + dex::StringIndex string_index); + PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file, + dex::StringIndex string_index); + + // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT. + void EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset); + + // Emit the BNE instruction for baker read barrier and record + // the associated patch for AOT or slow path for JIT. + void EmitBakerReadBarrierBne(uint32_t custom_data); + + VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address); + VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, + dex::StringIndex string_index, + Handle handle); + VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, + dex::TypeIndex type_index, + Handle handle); + + void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference); + void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset); + + void EmitLinkerPatches(ArenaVector* linker_patches) override; + bool NeedsThunkCode(const linker::LinkerPatch& patch) const override; + void EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector* code, + /*out*/ std::string* debug_name) override; + + void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override; + + // Generate a GC root reference load: + // + // root <- *(obj + offset) + // + // while honoring read barriers based on read_barrier_option. + void GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + vixl::aarch32::Register obj, + uint32_t offset, + ReadBarrierOption read_barrier_option); + // Generate ADD for UnsafeCASObject to reconstruct the old value from + // `old_value - expected` and mark it with Baker read barrier. + void GenerateUnsafeCasOldValueAddWithBakerReadBarrier(vixl::aarch32::Register old_value, + vixl::aarch32::Register adjusted_old_value, + vixl::aarch32::Register expected); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference field load when Baker's read barriers are used. + // Overload suitable for Unsafe.getObject/-Volatile() intrinsic. + void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl::aarch32::Register obj, + const vixl::aarch32::MemOperand& src, + bool needs_null_check); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference field load when Baker's read barriers are used. + void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + vixl::aarch32::Register obj, + uint32_t offset, + Location temp, + bool needs_null_check); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference array load when Baker's read barriers are used. + void GenerateArrayLoadWithBakerReadBarrier(Location ref, + vixl::aarch32::Register obj, + uint32_t data_offset, + Location index, + Location temp, + bool needs_null_check); + + // Emit code checking the status of the Marking Register, and + // aborting the program if MR does not match the value stored in the + // art::Thread object. Code is only emitted in debug mode and if + // CompilerOptions::EmitRunTimeChecksInDebugMode returns true. + // + // Argument `code` is used to identify the different occurrences of + // MaybeGenerateMarkingRegisterCheck in the code generator, and is + // used together with kMarkingRegisterCheckBreakCodeBaseCode to + // create the value passed to the BKPT instruction. Note that unlike + // in the ARM64 code generator, where `__LINE__` is passed as `code` + // argument to + // CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck, we cannot + // realistically do that here, as Encoding T1 for the BKPT + // instruction only accepts 8-bit immediate values. + // + // If `temp_loc` is a valid location, it is expected to be a + // register and will be used as a temporary to generate code; + // otherwise, a temporary will be fetched from the core register + // scratch pool. + virtual void MaybeGenerateMarkingRegisterCheck(int code, + Location temp_loc = Location::NoLocation()); + + // Generate a read barrier for a heap reference within `instruction` + // using a slow path. + // + // A read barrier for an object reference read from the heap is + // implemented as a call to the artReadBarrierSlow runtime entry + // point, which is passed the values in locations `ref`, `obj`, and + // `offset`: + // + // mirror::Object* artReadBarrierSlow(mirror::Object* ref, + // mirror::Object* obj, + // uint32_t offset); + // + // The `out` location contains the value returned by + // artReadBarrierSlow. + // + // When `index` is provided (i.e. for array accesses), the offset + // value passed to artReadBarrierSlow is adjusted to take `index` + // into account. + void GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // If read barriers are enabled, generate a read barrier for a heap + // reference using a slow path. If heap poisoning is enabled, also + // unpoison the reference in `out`. + void MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // Generate a read barrier for a GC root within `instruction` using + // a slow path. + // + // A read barrier for an object reference GC root is implemented as + // a call to the artReadBarrierForRootSlow runtime entry point, + // which is passed the value in location `root`: + // + // mirror::Object* artReadBarrierForRootSlow(GcRoot* root); + // + // The `out` location contains the value returned by + // artReadBarrierForRootSlow. + void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); + + void GenerateNop() override; + + void GenerateImplicitNullCheck(HNullCheck* instruction) override; + void GenerateExplicitNullCheck(HNullCheck* instruction) override; + + JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) { + jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr)); + return jump_tables_.back().get(); + } + void EmitJumpTables(); + + void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels, + vixl::aarch32::Register out); + + // `temp` is an extra temporary register that is used for some conditions; + // callers may not specify it, in which case the method will use a scratch + // register instead. + void GenerateConditionWithZero(IfCondition condition, + vixl::aarch32::Register out, + vixl::aarch32::Register in, + vixl::aarch32::Register temp = vixl32::Register()); + + void MaybeRecordImplicitNullCheck(HInstruction* instr) final { + // The function must be only be called within special scopes + // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of + // veneer/literal pools by VIXL assembler. + CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true) + << "The function must only be called within EmissionCheckScope or ExactAssemblyScope"; + CodeGenerator::MaybeRecordImplicitNullCheck(instr); + } + + void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass); + void MaybeIncrementHotness(bool is_frame_entry); + + private: + // Encoding of thunk type and data for link-time generated thunks for Baker read barriers. + + enum class BakerReadBarrierKind : uint8_t { + kField, // Field get or array get with constant offset (i.e. constant index). + kArray, // Array get with index in register. + kGcRoot, // GC root load. + kUnsafeCas, // UnsafeCASObject intrinsic. + kLast = kUnsafeCas + }; + + enum class BakerReadBarrierWidth : uint8_t { + kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled). + kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled). + kLast = kNarrow + }; + + static constexpr uint32_t kBakerReadBarrierInvalidEncodedReg = /* pc is invalid */ 15u; + + static constexpr size_t kBitsForBakerReadBarrierKind = + MinimumBitsToStore(static_cast(BakerReadBarrierKind::kLast)); + static constexpr size_t kBakerReadBarrierBitsForRegister = + MinimumBitsToStore(kBakerReadBarrierInvalidEncodedReg); + using BakerReadBarrierKindField = + BitField; + using BakerReadBarrierFirstRegField = + BitField; + using BakerReadBarrierSecondRegField = + BitField; + static constexpr size_t kBitsForBakerReadBarrierWidth = + MinimumBitsToStore(static_cast(BakerReadBarrierWidth::kLast)); + using BakerReadBarrierWidthField = + BitField; + + static void CheckValidReg(uint32_t reg) { + DCHECK(reg < vixl::aarch32::ip.GetCode() && reg != mr.GetCode()) << reg; + } + + static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, + uint32_t holder_reg, + bool narrow) { + CheckValidReg(base_reg); + CheckValidReg(holder_reg); + DCHECK(!narrow || base_reg < 8u) << base_reg; + BakerReadBarrierWidth width = + narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(holder_reg) | + BakerReadBarrierWidthField::Encode(width); + } + + static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { + CheckValidReg(base_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | + BakerReadBarrierFirstRegField::Encode(base_reg) | + BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) | + BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide); + } + + static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { + CheckValidReg(root_reg); + DCHECK(!narrow || root_reg < 8u) << root_reg; + BakerReadBarrierWidth width = + narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | + BakerReadBarrierFirstRegField::Encode(root_reg) | + BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) | + BakerReadBarrierWidthField::Encode(width); + } + + static uint32_t EncodeBakerReadBarrierUnsafeCasData(uint32_t root_reg) { + CheckValidReg(root_reg); + return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kUnsafeCas) | + BakerReadBarrierFirstRegField::Encode(root_reg) | + BakerReadBarrierSecondRegField::Encode(kBakerReadBarrierInvalidEncodedReg) | + BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide); + } + + void CompileBakerReadBarrierThunk(ArmVIXLAssembler& assembler, + uint32_t encoded_data, + /*out*/ std::string* debug_name); + + vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, + vixl::aarch32::Register temp); + + using Uint32ToLiteralMap = ArenaSafeMap; + using StringToLiteralMap = ArenaSafeMap; + using TypeToLiteralMap = ArenaSafeMap; + + struct BakerReadBarrierPatchInfo { + explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { } + + vixl::aarch32::Label label; + uint32_t custom_data; + }; + + VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); + PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file, + uint32_t offset_or_index, + ArenaDeque* patches); + template + static void EmitPcRelativeLinkerPatches(const ArenaDeque& infos, + ArenaVector* linker_patches); + + // Labels for each block that will be compiled. + // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory. + ArenaDeque block_labels_; // Indexed by block id. + vixl::aarch32::Label frame_entry_label_; + + ArenaVector> jump_tables_; + LocationsBuilderARMVIXL location_builder_; + InstructionCodeGeneratorARMVIXL instruction_visitor_; + ParallelMoveResolverARMVIXL move_resolver_; + + ArmVIXLAssembler assembler_; + + // PC-relative method patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_method_patches_; + // PC-relative method patch info for kBssEntry. + ArenaDeque method_bss_entry_patches_; + // PC-relative type patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_type_patches_; + // PC-relative type patch info for kBssEntry. + ArenaDeque type_bss_entry_patches_; + // PC-relative String patch info for kBootImageLinkTimePcRelative. + ArenaDeque boot_image_string_patches_; + // PC-relative String patch info for kBssEntry. + ArenaDeque string_bss_entry_patches_; + // PC-relative patch info for IntrinsicObjects for the boot image, + // and for method/type/string patches for kBootImageRelRo otherwise. + ArenaDeque boot_image_other_patches_; + // Patch info for calls to entrypoint dispatch thunks. Used for slow paths. + ArenaDeque> call_entrypoint_patches_; + // Baker read barrier patch info. + ArenaDeque baker_read_barrier_patches_; + + // Deduplication map for 32-bit literals, used for JIT for boot image addresses. + Uint32ToLiteralMap uint32_literals_; + // Patches for string literals in JIT compiled code. + StringToLiteralMap jit_string_patches_; + // Patches for class literals in JIT compiled code. + TypeToLiteralMap jit_class_patches_; + + // Baker read barrier slow paths, mapping custom data (uint32_t) to label. + // Wrap the label to work around vixl::aarch32::Label being non-copyable + // and non-moveable and as such unusable in ArenaSafeMap<>. + struct LabelWrapper { + LabelWrapper(const LabelWrapper& src) + : label() { + DCHECK(!src.label.IsReferenced() && !src.label.IsBound()); + } + LabelWrapper() = default; + vixl::aarch32::Label label; + }; + ArenaSafeMap jit_baker_read_barrier_slow_paths_; + + friend class linker::Thumb2RelativePatcherTest; + DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL); +}; + +} // namespace arm +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ diff --git a/compiler/optimizing/code_generator_utils.cc b/compiler/optimizing/code_generator_utils.cc new file mode 100644 index 0000000..dd47a1f --- /dev/null +++ b/compiler/optimizing/code_generator_utils.cc @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_utils.h" + +#include + +#include "nodes.h" + +namespace art { + +void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long, + int64_t* magic, int* shift) { + // It does not make sense to calculate magic and shift for zero divisor. + DCHECK_NE(divisor, 0); + + /* Implementation according to H.S.Warren's "Hacker's Delight" (Addison Wesley, 2002) + * Chapter 10 and T.Grablund, P.L.Montogomery's "Division by Invariant Integers Using + * Multiplication" (PLDI 1994). + * The magic number M and shift S can be calculated in the following way: + * Let nc be the most positive value of numerator(n) such that nc = kd - 1, + * where divisor(d) >= 2. + * Let nc be the most negative value of numerator(n) such that nc = kd + 1, + * where divisor(d) <= -2. + * Thus nc can be calculated like: + * nc = exp + exp % d - 1, where d >= 2 and exp = 2^31 for int or 2^63 for long + * nc = -exp + (exp + 1) % d, where d >= 2 and exp = 2^31 for int or 2^63 for long + * + * So the shift p is the smallest p satisfying + * 2^p > nc * (d - 2^p % d), where d >= 2 + * 2^p > nc * (d + 2^p % d), where d <= -2. + * + * The magic number M is calculated by + * M = (2^p + d - 2^p % d) / d, where d >= 2 + * M = (2^p - d - 2^p % d) / d, where d <= -2. + * + * Notice that p is always bigger than or equal to 32 (resp. 64), so we just return 32 - p + * (resp. 64 - p) as the shift number S. + */ + + int64_t p = is_long ? 63 : 31; + const uint64_t exp = is_long ? (UINT64_C(1) << 63) : (UINT32_C(1) << 31); + + // Initialize the computations. + uint64_t abs_d = (divisor >= 0) ? divisor : -divisor; + uint64_t sign_bit = is_long ? static_cast(divisor) >> 63 : + static_cast(divisor) >> 31; + uint64_t tmp = exp + sign_bit; + uint64_t abs_nc = tmp - 1 - (tmp % abs_d); + uint64_t quotient1 = exp / abs_nc; + uint64_t remainder1 = exp % abs_nc; + uint64_t quotient2 = exp / abs_d; + uint64_t remainder2 = exp % abs_d; + + /* + * To avoid handling both positive and negative divisor, "Hacker's Delight" + * introduces a method to handle these 2 cases together to avoid duplication. + */ + uint64_t delta; + do { + p++; + quotient1 = 2 * quotient1; + remainder1 = 2 * remainder1; + if (remainder1 >= abs_nc) { + quotient1++; + remainder1 = remainder1 - abs_nc; + } + quotient2 = 2 * quotient2; + remainder2 = 2 * remainder2; + if (remainder2 >= abs_d) { + quotient2++; + remainder2 = remainder2 - abs_d; + } + delta = abs_d - remainder2; + } while (quotient1 < delta || (quotient1 == delta && remainder1 == 0)); + + *magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1); + + if (!is_long) { + *magic = static_cast(*magic); + } + + *shift = is_long ? p - 64 : p - 32; +} + +bool IsBooleanValueOrMaterializedCondition(HInstruction* cond_input) { + return !cond_input->IsCondition() || !cond_input->IsEmittedAtUseSite(); +} + +} // namespace art diff --git a/compiler/optimizing/code_generator_utils.h b/compiler/optimizing/code_generator_utils.h new file mode 100644 index 0000000..a6b41c0 --- /dev/null +++ b/compiler/optimizing/code_generator_utils.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_ +#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_ + +#include +#include +#include + +namespace art { + +class HInstruction; + +// Computes the magic number and the shift needed in the div/rem by constant algorithm, as out +// arguments `magic` and `shift` +void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long, int64_t* magic, int* shift); + +// Returns true if `cond_input` is expected to have a location. Assumes that +// `cond_input` is a conditional input of the currently emitted instruction and +// that it has been previously visited by the InstructionCodeGenerator. +bool IsBooleanValueOrMaterializedCondition(HInstruction* cond_input); + +template T AbsOrMin(T value) { + return (value == std::numeric_limits::min()) + ? value + : std::abs(value); +} + +} // namespace art + +#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_ diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc new file mode 100644 index 0000000..df95c88 --- /dev/null +++ b/compiler/optimizing/code_generator_vector_arm64.cc @@ -0,0 +1,1523 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_arm64.h" + +#include "arch/arm64/instruction_set_features_arm64.h" +#include "mirror/array-inl.h" +#include "mirror/string.h" + +using namespace vixl::aarch64; // NOLINT(build/namespaces) + +namespace art { +namespace arm64 { + +using helpers::ARM64EncodableConstantOrRegister; +using helpers::Arm64CanEncodeConstantAsImmediate; +using helpers::DRegisterFrom; +using helpers::HeapOperand; +using helpers::InputRegisterAt; +using helpers::Int64FromLocation; +using helpers::OutputRegister; +using helpers::VRegisterFrom; +using helpers::WRegisterFrom; +using helpers::XRegisterFrom; + +#define __ GetVIXLAssembler()-> + +// Build-time switch for Armv8.4-a dot product instructions. +// TODO: Enable dot product when there is a device to test it on. +static constexpr bool kArm64EmitDotProdInstructions = false; + +// Returns whether dot product instructions should be emitted. +static bool ShouldEmitDotProductInstructions(const CodeGeneratorARM64* codegen_) { + return kArm64EmitDotProdInstructions && codegen_->GetInstructionSetFeatures().HasDotProd(); +} + +void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + HInstruction* input = instruction->InputAt(0); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, ARM64EncodableConstantOrRegister(input, instruction)); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + if (input->IsConstant() && + Arm64CanEncodeConstantAsImmediate(input->AsConstant(), instruction)) { + locations->SetInAt(0, Location::ConstantLocation(input->AsConstant())); + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location src_loc = locations->InAt(0); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Movi(dst.V16B(), Int64FromLocation(src_loc)); + } else { + __ Dup(dst.V16B(), InputRegisterAt(instruction, 0)); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Movi(dst.V8H(), Int64FromLocation(src_loc)); + } else { + __ Dup(dst.V8H(), InputRegisterAt(instruction, 0)); + } + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Movi(dst.V4S(), Int64FromLocation(src_loc)); + } else { + __ Dup(dst.V4S(), InputRegisterAt(instruction, 0)); + } + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Movi(dst.V2D(), Int64FromLocation(src_loc)); + } else { + __ Dup(dst.V2D(), XRegisterFrom(src_loc)); + } + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Fmov(dst.V4S(), src_loc.GetConstant()->AsFloatConstant()->GetValue()); + } else { + __ Dup(dst.V4S(), VRegisterFrom(src_loc).V4S(), 0); + } + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + if (src_loc.IsConstant()) { + __ Fmov(dst.V2D(), src_loc.GetConstant()->AsDoubleConstant()->GetValue()); + } else { + __ Dup(dst.V2D(), VRegisterFrom(src_loc).V2D(), 0); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Umov(OutputRegister(instruction), src.V4S(), 0); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Umov(OutputRegister(instruction), src.V2D(), 0); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 4u); + DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector unary operations. +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), + instruction->IsVecNot() ? Location::kOutputOverlap + : Location::kNoOutputOverlap); + break; + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecReduce(HVecReduce* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + VRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ Addv(dst.S(), src.V4S()); + break; + case HVecReduce::kMin: + __ Sminv(dst.S(), src.V4S()); + break; + case HVecReduce::kMax: + __ Smaxv(dst.S(), src.V4S()); + break; + } + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ Addp(dst.D(), src.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD min/max"; + UNREACHABLE(); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Scvtf(dst.V4S(), src.V4S()); + } else { + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } +} + +void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Neg(dst.V16B(), src.V16B()); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Neg(dst.V8H(), src.V8H()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Neg(dst.V4S(), src.V4S()); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Neg(dst.V2D(), src.V2D()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fneg(dst.V4S(), src.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fneg(dst.V2D(), src.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Abs(dst.V16B(), src.V16B()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Abs(dst.V8H(), src.V8H()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Abs(dst.V4S(), src.V4S()); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Abs(dst.V2D(), src.V2D()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fabs(dst.V4S(), src.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fabs(dst.V2D(), src.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister src = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: // special case boolean-not + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Movi(dst.V16B(), 1); + __ Eor(dst.V16B(), dst.V16B(), src.V16B()); + break; + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + __ Not(dst.V16B(), src.V16B()); // lanes do not matter + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector binary operations. +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Add(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Add(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Add(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Add(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fadd(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fadd(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Uqadd(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Sqadd(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Uqadd(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Sqadd(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Urhadd(dst.V16B(), lhs.V16B(), rhs.V16B()) + : __ Uhadd(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Srhadd(dst.V16B(), lhs.V16B(), rhs.V16B()) + : __ Shadd(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Urhadd(dst.V8H(), lhs.V8H(), rhs.V8H()) + : __ Uhadd(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Srhadd(dst.V8H(), lhs.V8H(), rhs.V8H()) + : __ Shadd(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Sub(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Sub(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Sub(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Sub(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fsub(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fsub(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecSaturationSub(HVecSaturationSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecSaturationSub(HVecSaturationSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Uqsub(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Sqsub(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Uqsub(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Sqsub(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Mul(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Mul(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Mul(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fmul(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fmul(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fdiv(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fdiv(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Umin(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Smin(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Umin(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Umax(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Smax(dst.V16B(), lhs.V16B(), rhs.V16B()); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Umax(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H()); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) { + // TODO: Allow constants supported by BIC (vector, immediate). + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ And(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecAndNot(HVecAndNot* instruction) { + LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId(); +} + +void InstructionCodeGeneratorARM64::VisitVecAndNot(HVecAndNot* instruction) { + // TODO: Use BIC (vector, register). + LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId(); +} + +void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Orr(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister rhs = VRegisterFrom(locations->InAt(1)); + VRegister dst = VRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + __ Eor(dst.V16B(), lhs.V16B(), rhs.V16B()); // lanes do not matter + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector shift operations. +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Shl(dst.V16B(), lhs.V16B(), value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Shl(dst.V8H(), lhs.V8H(), value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Shl(dst.V4S(), lhs.V4S(), value); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Shl(dst.V2D(), lhs.V2D(), value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Sshr(dst.V16B(), lhs.V16B(), value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Sshr(dst.V8H(), lhs.V8H(), value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Sshr(dst.V4S(), lhs.V4S(), value); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Sshr(dst.V2D(), lhs.V2D(), value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister lhs = VRegisterFrom(locations->InAt(0)); + VRegister dst = VRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Ushr(dst.V16B(), lhs.V16B(), value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Ushr(dst.V8H(), lhs.V8H(), value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Ushr(dst.V4S(), lhs.V4S(), value); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Ushr(dst.V2D(), lhs.V2D(), value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARM64::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister dst = VRegisterFrom(locations->Out()); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + // Zero out all other elements first. + __ Movi(dst.V16B(), 0); + + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + return; + } + + // Set required elements. + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ Mov(dst.V16B(), 0, InputRegisterAt(instruction, 0)); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Mov(dst.V8H(), 0, InputRegisterAt(instruction, 0)); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Mov(dst.V4S(), 0, InputRegisterAt(instruction, 0)); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Mov(dst.V2D(), 0, InputRegisterAt(instruction, 0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector accumulations. +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +// Some early revisions of the Cortex-A53 have an erratum (835769) whereby it is possible for a +// 64-bit scalar multiply-accumulate instruction in AArch64 state to generate an incorrect result. +// However vector MultiplyAccumulate instruction is not affected. +void InstructionCodeGeneratorARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister acc = VRegisterFrom(locations->InAt(0)); + VRegister left = VRegisterFrom(locations->InAt(1)); + VRegister right = VRegisterFrom(locations->InAt(2)); + + DCHECK(locations->InAt(0).Equals(locations->Out())); + + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + if (instruction->GetOpKind() == HInstruction::kAdd) { + __ Mla(acc.V16B(), left.V16B(), right.V16B()); + } else { + __ Mls(acc.V16B(), left.V16B(), right.V16B()); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + if (instruction->GetOpKind() == HInstruction::kAdd) { + __ Mla(acc.V8H(), left.V8H(), right.V8H()); + } else { + __ Mls(acc.V8H(), left.V8H(), right.V8H()); + } + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + if (instruction->GetOpKind() == HInstruction::kAdd) { + __ Mla(acc.V4S(), left.V4S(), right.V4S()); + } else { + __ Mls(acc.V4S(), left.V4S(), right.V4S()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); + // Some conversions require temporary registers. + LocationSummary* locations = instruction->GetLocations(); + HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); + HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); + DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()), + HVecOperation::ToSignedType(b->GetPackedType())); + switch (a->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + switch (instruction->GetPackedType()) { + case DataType::Type::kInt64: + locations->AddTemp(Location::RequiresFpuRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + FALLTHROUGH_INTENDED; + case DataType::Type::kInt32: + locations->AddTemp(Location::RequiresFpuRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + break; + default: + break; + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + if (instruction->GetPackedType() == DataType::Type::kInt64) { + locations->AddTemp(Location::RequiresFpuRegister()); + locations->AddTemp(Location::RequiresFpuRegister()); + } + break; + case DataType::Type::kInt32: + case DataType::Type::kInt64: + if (instruction->GetPackedType() == a->GetPackedType()) { + locations->AddTemp(Location::RequiresFpuRegister()); + } + break; + default: + break; + } +} + +void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + LocationSummary* locations = instruction->GetLocations(); + VRegister acc = VRegisterFrom(locations->InAt(0)); + VRegister left = VRegisterFrom(locations->InAt(1)); + VRegister right = VRegisterFrom(locations->InAt(2)); + + DCHECK(locations->InAt(0).Equals(locations->Out())); + + // Handle all feasible acc_T += sad(a_S, b_S) type combinations (T x S). + HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); + HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); + DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()), + HVecOperation::ToSignedType(b->GetPackedType())); + switch (a->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, a->GetVectorLength()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Sabal(acc.V8H(), left.V8B(), right.V8B()); + __ Sabal2(acc.V8H(), left.V16B(), right.V16B()); + break; + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); + VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); + __ Sxtl(tmp1.V8H(), left.V8B()); + __ Sxtl(tmp2.V8H(), right.V8B()); + __ Sabal(acc.V4S(), tmp1.V4H(), tmp2.V4H()); + __ Sabal2(acc.V4S(), tmp1.V8H(), tmp2.V8H()); + __ Sxtl2(tmp1.V8H(), left.V16B()); + __ Sxtl2(tmp2.V8H(), right.V16B()); + __ Sabal(acc.V4S(), tmp1.V4H(), tmp2.V4H()); + __ Sabal2(acc.V4S(), tmp1.V8H(), tmp2.V8H()); + break; + } + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); + VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); + VRegister tmp3 = VRegisterFrom(locations->GetTemp(2)); + VRegister tmp4 = VRegisterFrom(locations->GetTemp(3)); + __ Sxtl(tmp1.V8H(), left.V8B()); + __ Sxtl(tmp2.V8H(), right.V8B()); + __ Sxtl(tmp3.V4S(), tmp1.V4H()); + __ Sxtl(tmp4.V4S(), tmp2.V4H()); + __ Sabal(acc.V2D(), tmp3.V2S(), tmp4.V2S()); + __ Sabal2(acc.V2D(), tmp3.V4S(), tmp4.V4S()); + __ Sxtl2(tmp3.V4S(), tmp1.V8H()); + __ Sxtl2(tmp4.V4S(), tmp2.V8H()); + __ Sabal(acc.V2D(), tmp3.V2S(), tmp4.V2S()); + __ Sabal2(acc.V2D(), tmp3.V4S(), tmp4.V4S()); + __ Sxtl2(tmp1.V8H(), left.V16B()); + __ Sxtl2(tmp2.V8H(), right.V16B()); + __ Sxtl(tmp3.V4S(), tmp1.V4H()); + __ Sxtl(tmp4.V4S(), tmp2.V4H()); + __ Sabal(acc.V2D(), tmp3.V2S(), tmp4.V2S()); + __ Sabal2(acc.V2D(), tmp3.V4S(), tmp4.V4S()); + __ Sxtl2(tmp3.V4S(), tmp1.V8H()); + __ Sxtl2(tmp4.V4S(), tmp2.V8H()); + __ Sabal(acc.V2D(), tmp3.V2S(), tmp4.V2S()); + __ Sabal2(acc.V2D(), tmp3.V4S(), tmp4.V4S()); + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, a->GetVectorLength()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Sabal(acc.V4S(), left.V4H(), right.V4H()); + __ Sabal2(acc.V4S(), left.V8H(), right.V8H()); + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); + VRegister tmp2 = VRegisterFrom(locations->GetTemp(1)); + __ Sxtl(tmp1.V4S(), left.V4H()); + __ Sxtl(tmp2.V4S(), right.V4H()); + __ Sabal(acc.V2D(), tmp1.V2S(), tmp2.V2S()); + __ Sabal2(acc.V2D(), tmp1.V4S(), tmp2.V4S()); + __ Sxtl2(tmp1.V4S(), left.V8H()); + __ Sxtl2(tmp2.V4S(), right.V8H()); + __ Sabal(acc.V2D(), tmp1.V2S(), tmp2.V2S()); + __ Sabal2(acc.V2D(), tmp1.V4S(), tmp2.V4S()); + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, a->GetVectorLength()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + VRegister tmp = VRegisterFrom(locations->GetTemp(0)); + __ Sub(tmp.V4S(), left.V4S(), right.V4S()); + __ Abs(tmp.V4S(), tmp.V4S()); + __ Add(acc.V4S(), acc.V4S(), tmp.V4S()); + break; + } + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Sabal(acc.V2D(), left.V2S(), right.V2S()); + __ Sabal2(acc.V2D(), left.V4S(), right.V4S()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, a->GetVectorLength()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + VRegister tmp = VRegisterFrom(locations->GetTemp(0)); + __ Sub(tmp.V2D(), left.V2D(), right.V2D()); + __ Abs(tmp.V2D(), tmp.V2D()); + __ Add(acc.V2D(), acc.V2D(), tmp.V2D()); + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } +} + +void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + DCHECK(instruction->GetPackedType() == DataType::Type::kInt32); + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + + // For Int8 and Uint8 general case we need a temp register. + if ((DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) && + !ShouldEmitDotProductInstructions(codegen_)) { + locations->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + VRegister acc = VRegisterFrom(locations->InAt(0)); + VRegister left = VRegisterFrom(locations->InAt(1)); + VRegister right = VRegisterFrom(locations->InAt(2)); + HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); + HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); + DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()), + HVecOperation::ToSignedType(b->GetPackedType())); + DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32); + DCHECK_EQ(4u, instruction->GetVectorLength()); + + size_t inputs_data_size = DataType::Size(a->GetPackedType()); + switch (inputs_data_size) { + case 1u: { + DCHECK_EQ(16u, a->GetVectorLength()); + if (instruction->IsZeroExtending()) { + if (ShouldEmitDotProductInstructions(codegen_)) { + __ Udot(acc.V4S(), left.V16B(), right.V16B()); + } else { + VRegister tmp = VRegisterFrom(locations->GetTemp(0)); + __ Umull(tmp.V8H(), left.V8B(), right.V8B()); + __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + + __ Umull2(tmp.V8H(), left.V16B(), right.V16B()); + __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + } + } else { + if (ShouldEmitDotProductInstructions(codegen_)) { + __ Sdot(acc.V4S(), left.V16B(), right.V16B()); + } else { + VRegister tmp = VRegisterFrom(locations->GetTemp(0)); + __ Smull(tmp.V8H(), left.V8B(), right.V8B()); + __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + + __ Smull2(tmp.V8H(), left.V16B(), right.V16B()); + __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H()); + __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H()); + } + } + break; + } + case 2u: + DCHECK_EQ(8u, a->GetVectorLength()); + if (instruction->IsZeroExtending()) { + __ Umlal(acc.V4S(), left.V4H(), right.V4H()); + __ Umlal2(acc.V4S(), left.V8H(), right.V8H()); + } else { + __ Smlal(acc.V4S(), left.V4H(), right.V4H()); + __ Smlal2(acc.V4S(), left.V8H(), right.V8H()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size; + } +} + +// Helper to set up locations for vector memory operations. +static void CreateVecMemLocations(ArenaAllocator* allocator, + HVecMemoryOperation* instruction, + bool is_load) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (is_load) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector memory operations. Returns the memory operand and, +// if used, sets the output parameter scratch to a temporary register used in this operand, +// so that the client can release it right after the memory operand use. +MemOperand InstructionCodeGeneratorARM64::VecAddress( + HVecMemoryOperation* instruction, + UseScratchRegisterScope* temps_scope, + size_t size, + bool is_string_char_at, + /*out*/ Register* scratch) { + LocationSummary* locations = instruction->GetLocations(); + Register base = InputRegisterAt(instruction, 0); + + if (instruction->InputAt(1)->IsIntermediateAddressIndex()) { + DCHECK(!is_string_char_at); + return MemOperand(base.X(), InputRegisterAt(instruction, 1).X()); + } + + Location index = locations->InAt(1); + uint32_t offset = is_string_char_at + ? mirror::String::ValueOffset().Uint32Value() + : mirror::Array::DataOffset(size).Uint32Value(); + size_t shift = ComponentSizeShiftWidth(size); + + // HIntermediateAddress optimization is only applied for scalar ArrayGet and ArraySet. + DCHECK(!instruction->InputAt(0)->IsIntermediateAddress()); + + if (index.IsConstant()) { + offset += Int64FromLocation(index) << shift; + return HeapOperand(base, offset); + } else { + *scratch = temps_scope->AcquireSameSizeAs(base); + __ Add(*scratch, base, Operand(WRegisterFrom(index), LSL, shift)); + return HeapOperand(*scratch, offset); + } +} + +void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); +} + +void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + VRegister reg = VRegisterFrom(locations->Out()); + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register scratch; + + switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + // Special handling of compressed/uncompressed string load. + if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + vixl::aarch64::Label uncompressed_load, done; + // Test compression bit. + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + Register length = temps.AcquireW(); + __ Ldr(length, HeapOperand(InputRegisterAt(instruction, 0), count_offset)); + __ Tbnz(length.W(), 0, &uncompressed_load); + temps.Release(length); // no longer needed + // Zero extend 8 compressed bytes into 8 chars. + __ Ldr(DRegisterFrom(locations->Out()).V8B(), + VecAddress(instruction, &temps, 1, /*is_string_char_at*/ true, &scratch)); + __ Uxtl(reg.V8H(), reg.V8B()); + __ B(&done); + if (scratch.IsValid()) { + temps.Release(scratch); // if used, no longer needed + } + // Load 8 direct uncompressed chars. + __ Bind(&uncompressed_load); + __ Ldr(reg, VecAddress(instruction, &temps, size, /*is_string_char_at*/ true, &scratch)); + __ Bind(&done); + return; + } + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + __ Ldr(reg, VecAddress(instruction, &temps, size, instruction->IsStringCharAt(), &scratch)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); +} + +void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + VRegister reg = VRegisterFrom(locations->InAt(2)); + UseScratchRegisterScope temps(GetVIXLAssembler()); + Register scratch; + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kFloat32: + case DataType::Type::kInt64: + case DataType::Type::kFloat64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + __ Str(reg, VecAddress(instruction, &temps, size, /*is_string_char_at*/ false, &scratch)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +#undef __ + +} // namespace arm64 +} // namespace art diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc new file mode 100644 index 0000000..b092961 --- /dev/null +++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc @@ -0,0 +1,1054 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_arm_vixl.h" +#include "mirror/array-inl.h" + +namespace vixl32 = vixl::aarch32; +using namespace vixl32; // NOLINT(build/namespaces) + +namespace art { +namespace arm { + +using helpers::DRegisterFrom; +using helpers::Int64ConstantFrom; +using helpers::InputDRegisterAt; +using helpers::InputRegisterAt; +using helpers::OutputDRegister; +using helpers::OutputRegister; +using helpers::RegisterFrom; + +#define __ GetVIXLAssembler()-> + +void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vdup(Untyped8, dst, InputRegisterAt(instruction, 0)); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vdup(Untyped16, dst, InputRegisterAt(instruction, 0)); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vdup(Untyped32, dst, InputRegisterAt(instruction, 0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmov(OutputRegister(instruction), DRegisterLane(src, 0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector unary operations. +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), + instruction->IsVecNot() ? Location::kOutputOverlap + : Location::kNoOutputOverlap); + break; + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecReduce(HVecReduce* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ Vpadd(DataTypeValue::I32, dst, src, src); + break; + case HVecReduce::kMin: + __ Vpmin(DataTypeValue::S32, dst, src, src); + break; + case HVecReduce::kMax: + __ Vpmax(DataTypeValue::S32, dst, src, src); + break; + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vneg(DataTypeValue::S8, dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vneg(DataTypeValue::S16, dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vneg(DataTypeValue::S32, dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vabs(DataTypeValue::S8, dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vabs(DataTypeValue::S16, dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vabs(DataTypeValue::S32, dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister src = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: // special case boolean-not + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmov(I8, dst, 1); + __ Veor(dst, dst, src); + break; + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Vmvn(I8, dst, src); // lanes do not matter + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector binary operations. +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vadd(I8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vadd(I16, dst, lhs, rhs); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vadd(I32, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vqadd(DataTypeValue::U8, dst, lhs, rhs); + break; + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vqadd(DataTypeValue::S8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vqadd(DataTypeValue::U16, dst, lhs, rhs); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vqadd(DataTypeValue::S16, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Vrhadd(DataTypeValue::U8, dst, lhs, rhs) + : __ Vhadd(DataTypeValue::U8, dst, lhs, rhs); + break; + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Vrhadd(DataTypeValue::S8, dst, lhs, rhs) + : __ Vhadd(DataTypeValue::S8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Vrhadd(DataTypeValue::U16, dst, lhs, rhs) + : __ Vhadd(DataTypeValue::U16, dst, lhs, rhs); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + instruction->IsRounded() + ? __ Vrhadd(DataTypeValue::S16, dst, lhs, rhs) + : __ Vhadd(DataTypeValue::S16, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vsub(I8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vsub(I16, dst, lhs, rhs); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vsub(I32, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecSaturationSub(HVecSaturationSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecSaturationSub(HVecSaturationSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vqsub(DataTypeValue::U8, dst, lhs, rhs); + break; + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vqsub(DataTypeValue::S8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vqsub(DataTypeValue::U16, dst, lhs, rhs); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vqsub(DataTypeValue::S16, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmul(I8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vmul(I16, dst, lhs, rhs); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmul(I32, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderARMVIXL::VisitVecMin(HVecMin* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::U8, dst, lhs, rhs); + break; + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::S8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::U16, dst, lhs, rhs); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::S16, dst, lhs, rhs); + break; + case DataType::Type::kUint32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::U32, dst, lhs, rhs); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmin(DataTypeValue::S32, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecMax(HVecMax* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::U8, dst, lhs, rhs); + break; + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::S8, dst, lhs, rhs); + break; + case DataType::Type::kUint16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::U16, dst, lhs, rhs); + break; + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::S16, dst, lhs, rhs); + break; + case DataType::Type::kUint32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::U32, dst, lhs, rhs); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmax(DataTypeValue::S32, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) { + // TODO: Allow constants supported by VAND (immediate). + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Vand(I8, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Vorr(I8, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + __ Veor(I8, dst, lhs, rhs); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector shift operations. +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vshl(I8, dst, lhs, value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vshl(I16, dst, lhs, value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vshl(I32, dst, lhs, value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::S8, dst, lhs, value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::S16, dst, lhs, value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::S32, dst, lhs, value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister lhs = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::U8, dst, lhs, value); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::U16, dst, lhs, value); + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vshr(DataTypeValue::U32, dst, lhs, value); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorARMVIXL::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister dst = DRegisterFrom(locations->Out()); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + // Zero out all other elements first. + __ Vmov(I32, dst, 0); + + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + return; + } + + // Set required elements. + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ Vmov(Untyped32, DRegisterLane(dst, 0), InputRegisterAt(instruction, 0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector accumulations. +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::DRegister acc = DRegisterFrom(locations->InAt(0)); + vixl32::DRegister left = DRegisterFrom(locations->InAt(1)); + vixl32::DRegister right = DRegisterFrom(locations->InAt(2)); + + DCHECK(locations->InAt(0).Equals(locations->Out())); + + // Handle all feasible acc_T += sad(a_S, b_S) type combinations (T x S). + HVecOperation* a = instruction->InputAt(1)->AsVecOperation(); + HVecOperation* b = instruction->InputAt(2)->AsVecOperation(); + DCHECK_EQ(a->GetPackedType(), b->GetPackedType()); + switch (a->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(2u, a->GetVectorLength()); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::DRegister tmp = temps.AcquireD(); + __ Vsub(DataTypeValue::I32, tmp, left, right); + __ Vabs(DataTypeValue::S32, tmp, tmp); + __ Vadd(DataTypeValue::I32, acc, acc, tmp); + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) { + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +// Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word +// size equals to 4). +static bool IsWordAligned(HVecMemoryOperation* instruction) { + return instruction->GetAlignment().IsAlignedAt(4u); +} + +// Helper to set up locations for vector memory operations. +static void CreateVecMemLocations(ArenaAllocator* allocator, + HVecMemoryOperation* instruction, + bool is_load) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (is_load) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector memory operations. Returns the memory operand and, +// if used, sets the output parameter scratch to a temporary register used in this operand, +// so that the client can release it right after the memory operand use. +MemOperand InstructionCodeGeneratorARMVIXL::VecAddress( + HVecMemoryOperation* instruction, + UseScratchRegisterScope* temps_scope, + /*out*/ vixl32::Register* scratch) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::Register base = InputRegisterAt(instruction, 0); + + Location index = locations->InAt(1); + size_t size = DataType::Size(instruction->GetPackedType()); + uint32_t offset = mirror::Array::DataOffset(size).Uint32Value(); + size_t shift = ComponentSizeShiftWidth(size); + + // HIntermediateAddress optimization is only applied for scalar ArrayGet and ArraySet. + DCHECK(!instruction->InputAt(0)->IsIntermediateAddress()); + + if (index.IsConstant()) { + offset += Int64ConstantFrom(index) << shift; + return MemOperand(base, offset); + } else { + *scratch = temps_scope->Acquire(); + __ Add(*scratch, base, Operand(RegisterFrom(index), ShiftType::LSL, shift)); + + return MemOperand(*scratch, offset); + } +} + +AlignedMemOperand InstructionCodeGeneratorARMVIXL::VecAddressUnaligned( + HVecMemoryOperation* instruction, + UseScratchRegisterScope* temps_scope, + /*out*/ vixl32::Register* scratch) { + LocationSummary* locations = instruction->GetLocations(); + vixl32::Register base = InputRegisterAt(instruction, 0); + + Location index = locations->InAt(1); + size_t size = DataType::Size(instruction->GetPackedType()); + uint32_t offset = mirror::Array::DataOffset(size).Uint32Value(); + size_t shift = ComponentSizeShiftWidth(size); + + // HIntermediateAddress optimization is only applied for scalar ArrayGet and ArraySet. + DCHECK(!instruction->InputAt(0)->IsIntermediateAddress()); + + if (index.IsConstant()) { + offset += Int64ConstantFrom(index) << shift; + __ Add(*scratch, base, offset); + } else { + *scratch = temps_scope->Acquire(); + __ Add(*scratch, base, offset); + __ Add(*scratch, *scratch, Operand(RegisterFrom(index), ShiftType::LSL, shift)); + } + return AlignedMemOperand(*scratch, kNoAlignment); +} + +void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) { + vixl32::DRegister reg = OutputDRegister(instruction); + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register scratch; + + DCHECK(instruction->GetPackedType() != DataType::Type::kUint16 || !instruction->IsStringCharAt()); + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vld1(Untyped8, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vld1(Untyped16, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vldr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vld1(Untyped32, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); +} + +void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) { + vixl32::DRegister reg = InputDRegisterAt(instruction, 2); + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register scratch; + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(8u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vst1(Untyped8, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(4u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vst1(Untyped16, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + case DataType::Type::kInt32: + DCHECK_EQ(2u, instruction->GetVectorLength()); + if (IsWordAligned(instruction)) { + __ Vstr(reg, VecAddress(instruction, &temps, &scratch)); + } else { + __ Vst1(Untyped32, + NeonRegisterList(reg, kMultipleLanes), + VecAddressUnaligned(instruction, &temps, &scratch)); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +#undef __ + +} // namespace arm +} // namespace art diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc new file mode 100644 index 0000000..1390af2 --- /dev/null +++ b/compiler/optimizing/code_generator_vector_x86.cc @@ -0,0 +1,1387 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_x86.h" + +#include "mirror/array-inl.h" +#include "mirror/string.h" + +namespace art { +namespace x86 { + +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(GetAssembler())-> // NOLINT + +void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt64: + // Long needs extra temporary to load from the register pair. + if (!is_zero) { + locations->AddTemp(Location::RequiresFpuRegister()); + } + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(is_zero ? Location::RequiresFpuRegister() + : Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); + return; + } + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); + __ punpcklbw(dst, dst); + __ punpcklwd(dst, dst); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); + __ punpcklwd(dst, dst); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ movd(dst, locations->InAt(0).AsRegisterPairLow()); + __ movd(tmp, locations->InAt(0).AsRegisterPairHigh()); + __ punpckldq(dst, tmp); + __ punpcklqdq(dst, dst); + break; + } + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + DCHECK(locations->InAt(0).Equals(locations->Out())); + __ shufps(dst, dst, Immediate(0)); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + DCHECK(locations->InAt(0).Equals(locations->Out())); + __ shufpd(dst, dst, Immediate(0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt64: + // Long needs extra temporary to store into the register pair. + locations->AddTemp(Location::RequiresFpuRegister()); + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + case DataType::Type::kInt32: + DCHECK_LE(4u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + __ movd(locations->Out().AsRegister(), src); + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ movd(locations->Out().AsRegisterPairLow(), src); + __ pshufd(tmp, src, Immediate(1)); + __ movd(locations->Out().AsRegisterPairHigh(), tmp); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 4u); + DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector unary operations. +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Long reduction or min/max require a temporary. + if (instruction->GetPackedType() == DataType::Type::kInt64 || + instruction->GetReductionKind() == HVecReduce::kMin || + instruction->GetReductionKind() == HVecReduce::kMax) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ movaps(dst, src); + __ phaddd(dst, dst); + __ phaddd(dst, dst); + break; + case HVecReduce::kMin: + case HVecReduce::kMax: + // Historical note: We've had a broken implementation here. b/117863065 + // Do not draw on the old code if we ever want to bring MIN/MAX reduction back. + LOG(FATAL) << "Unsupported reduction type."; + } + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ movaps(tmp, src); + __ movaps(dst, src); + __ punpckhqdq(tmp, tmp); + __ paddq(dst, tmp); + break; + case HVecReduce::kMin: + case HVecReduce::kMax: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ cvtdq2ps(dst, src); + } else { + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } +} + +void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ xorps(dst, dst); + __ subps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ xorpd(dst, dst); + __ subpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Integral-abs requires a temporary for the comparison. + if (instruction->GetPackedType() == DataType::Type::kInt32) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86::VisitVecAbs(HVecAbs* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ movaps(dst, src); + __ pxor(tmp, tmp); + __ pcmpgtd(tmp, dst); + __ pxor(dst, tmp); + __ psubd(dst, tmp); + break; + } + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ psrld(dst, Immediate(1)); + __ andps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ psrlq(dst, Immediate(1)); + __ andpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Boolean-not requires a temporary to construct the 16 x one. + if (instruction->GetPackedType() == DataType::Type::kBool) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86::VisitVecNot(HVecNot* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: { // special case boolean-not + DCHECK_EQ(16u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ pxor(dst, dst); + __ pcmpeqb(tmp, tmp); // all ones + __ psubb(dst, tmp); // 16 x one + __ pxor(dst, src); + break; + } + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + __ pcmpeqb(dst, dst); // all ones + __ pxor(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ xorps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ xorpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector binary operations. +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +static void CreateVecTerOpLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type"; + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddb(dst, other_src, src) : __ paddb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddw(dst, other_src, src) : __ paddw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddd(dst, other_src, src) : __ paddd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddq(dst, other_src, src) : __ paddq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vaddps(dst, other_src, src) : __ addps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vaddpd(dst, other_src, src) : __ addpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ paddusb(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ paddsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ paddusw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ paddsw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + DCHECK(instruction->IsRounded()); + + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pavgb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pavgw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubb(dst, other_src, src) : __ psubb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubw(dst, other_src, src) : __ psubw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubd(dst, other_src, src) : __ psubd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubq(dst, other_src, src) : __ psubq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vsubps(dst, other_src, src) : __ subps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vsubpd(dst, other_src, src) : __ subpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecSaturationSub(HVecSaturationSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecSaturationSub(HVecSaturationSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ psubusb(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ psubsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psubusw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psubsw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpmullw(dst, other_src, src) : __ pmullw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpmulld(dst, other_src, src) : __ pmulld(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vmulps(dst, other_src, src) : __ mulps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vmulpd(dst, other_src, src) : __ mulpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vdivps(dst, other_src, src) : __ divps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vdivpd(dst, other_src, src) : __ divpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pminub(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pminsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pminuw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pminsw(dst, src); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pminud(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pminsd(dst, src); + break; + // Next cases are sloppy wrt 0.0 vs -0.0. + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ minps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ minpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pmaxub(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pmaxsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pmaxuw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pmaxsw(dst, src); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pmaxud(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pmaxsd(dst, src); + break; + // Next cases are sloppy wrt 0.0 vs -0.0. + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ maxps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ maxpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpand(dst, other_src, src) : __ pand(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandps(dst, other_src, src) : __ andps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandpd(dst, other_src, src) : __ andpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpandn(dst, other_src, src) : __ pandn(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandnps(dst, other_src, src) : __ andnps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandnpd(dst, other_src, src) : __ andnpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpor(dst, other_src, src) : __ por(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vorps(dst, other_src, src) : __ orps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vorpd(dst, other_src, src) : __ orpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpxor(dst, other_src, src) : __ pxor(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vxorps(dst, other_src, src) : __ xorps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vxorpd(dst, other_src, src) : __ xorpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector shift operations. +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psllw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pslld(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ psllq(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psraw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ psrad(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psrlw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ psrld(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ psrlq(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + + switch (instruction->GetPackedType()) { + case DataType::Type::kInt64: + // Long needs extra temporary to load from register pairs. + if (!is_zero) { + locations->AddTemp(Location::RequiresFpuRegister()); + } + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + // Zero out all other elements first. + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); + + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + return; + } + + // Set required elements. + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ xorps(tmp, tmp); + __ movd(dst, locations->InAt(0).AsRegisterPairLow()); + __ movd(tmp, locations->InAt(0).AsRegisterPairHigh()); + __ punpckldq(dst, tmp); + break; + } + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movss(dst, locations->InAt(1).AsFpuRegister()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ movsd(dst, locations->InAt(1).AsFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector accumulations. +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + // TODO: pmaddwd? + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + // TODO: psadbw for unsigned? + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + locations->AddTemp(Location::RequiresFpuRegister()); +} + +void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister acc = locations->InAt(0).AsFpuRegister(); + XmmRegister left = locations->InAt(1).AsFpuRegister(); + XmmRegister right = locations->InAt(2).AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + if (!cpu_has_avx) { + __ movaps(tmp, right); + __ pmaddwd(tmp, left); + __ paddd(acc, tmp); + } else { + __ vpmaddwd(tmp, left, right); + __ vpaddd(acc, acc, tmp); + } + break; + } + default: + LOG(FATAL) << "Unsupported SIMD Type" << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector memory operations. +static void CreateVecMemLocations(ArenaAllocator* allocator, + HVecMemoryOperation* instruction, + bool is_load) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (is_load) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to construct address for vector memory operations. +static Address VecAddress(LocationSummary* locations, size_t size, bool is_string_char_at) { + Location base = locations->InAt(0); + Location index = locations->InAt(1); + ScaleFactor scale = TIMES_1; + switch (size) { + case 2: scale = TIMES_2; break; + case 4: scale = TIMES_4; break; + case 8: scale = TIMES_8; break; + default: break; + } + // Incorporate the string or array offset in the address computation. + uint32_t offset = is_string_char_at + ? mirror::String::ValueOffset().Uint32Value() + : mirror::Array::DataOffset(size).Uint32Value(); + return CodeGeneratorX86::ArrayAddress(base.AsRegister(), index, scale, offset); +} + +void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); + // String load requires a temporary for the compressed load. + if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86::VisitVecLoad(HVecLoad* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + Address address = VecAddress(locations, size, instruction->IsStringCharAt()); + XmmRegister reg = locations->Out().AsFpuRegister(); + bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + // Special handling of compressed/uncompressed string load. + if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + NearLabel done, not_compressed; + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + // Test compression bit. + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + __ testb(Address(locations->InAt(0).AsRegister(), count_offset), Immediate(1)); + __ j(kNotZero, ¬_compressed); + // Zero extend 8 compressed bytes into 8 chars. + __ movsd(reg, VecAddress(locations, 1, instruction->IsStringCharAt())); + __ pxor(tmp, tmp); + __ punpcklbw(reg, tmp); + __ jmp(&done); + // Load 4 direct uncompressed chars. + __ Bind(¬_compressed); + is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); + __ Bind(&done); + return; + } + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); +} + +void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + Address address = VecAddress(locations, size, /*is_string_char_at*/ false); + XmmRegister reg = locations->InAt(2).AsFpuRegister(); + bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +#undef __ + +} // namespace x86 +} // namespace art diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc new file mode 100644 index 0000000..7fac44d --- /dev/null +++ b/compiler/optimizing/code_generator_vector_x86_64.cc @@ -0,0 +1,1360 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_x86_64.h" + +#include "mirror/array-inl.h" +#include "mirror/string.h" + +namespace art { +namespace x86_64 { + +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(GetAssembler())-> // NOLINT + +void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(is_zero ? Location::RequiresFpuRegister() + : Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); + return; + } + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister(), /*64-bit*/ false); + __ punpcklbw(dst, dst); + __ punpcklwd(dst, dst); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister(), /*64-bit*/ false); + __ punpcklwd(dst, dst); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister(), /*64-bit*/ false); + __ pshufd(dst, dst, Immediate(0)); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister(), /*64-bit*/ true); + __ punpcklqdq(dst, dst); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + DCHECK(locations->InAt(0).Equals(locations->Out())); + __ shufps(dst, dst, Immediate(0)); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + DCHECK(locations->InAt(0).Equals(locations->Out())); + __ shufpd(dst, dst, Immediate(0)); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movd(locations->Out().AsRegister(), src, /*64-bit*/ false); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ movd(locations->Out().AsRegister(), src, /*64-bit*/ true); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 4u); + DCHECK(locations->InAt(0).Equals(locations->Out())); // no code required + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector unary operations. +static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Long reduction or min/max require a temporary. + if (instruction->GetPackedType() == DataType::Type::kInt64 || + instruction->GetReductionKind() == HVecReduce::kMin || + instruction->GetReductionKind() == HVecReduce::kMax) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ movaps(dst, src); + __ phaddd(dst, dst); + __ phaddd(dst, dst); + break; + case HVecReduce::kMin: + case HVecReduce::kMax: + // Historical note: We've had a broken implementation here. b/117863065 + // Do not draw on the old code if we ever want to bring MIN/MAX reduction back. + LOG(FATAL) << "Unsupported reduction type."; + } + break; + case DataType::Type::kInt64: { + DCHECK_EQ(2u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + switch (instruction->GetReductionKind()) { + case HVecReduce::kSum: + __ movaps(tmp, src); + __ movaps(dst, src); + __ punpckhqdq(tmp, tmp); + __ paddq(dst, tmp); + break; + case HVecReduce::kMin: + case HVecReduce::kMax: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } + break; + } + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DataType::Type from = instruction->GetInputType(); + DataType::Type to = instruction->GetResultType(); + if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) { + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ cvtdq2ps(dst, src); + } else { + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + } +} + +void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pxor(dst, dst); + __ psubq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ xorps(dst, dst); + __ subps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ xorpd(dst, dst); + __ subpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Integral-abs requires a temporary for the comparison. + if (instruction->GetPackedType() == DataType::Type::kInt32) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecAbs(HVecAbs* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ movaps(dst, src); + __ pxor(tmp, tmp); + __ pcmpgtd(tmp, dst); + __ pxor(dst, tmp); + __ psubd(dst, tmp); + break; + } + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ psrld(dst, Immediate(1)); + __ andps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ psrlq(dst, Immediate(1)); + __ andpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) { + CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction); + // Boolean-not requires a temporary to construct the 16 x one. + if (instruction->GetPackedType() == DataType::Type::kBool) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecNot(HVecNot* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: { // special case boolean-not + DCHECK_EQ(16u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + __ pxor(dst, dst); + __ pcmpeqb(tmp, tmp); // all ones + __ psubb(dst, tmp); // 16 x one + __ pxor(dst, src); + break; + } + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + __ pcmpeqb(dst, dst); // all ones + __ pxor(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ xorps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ pcmpeqb(dst, dst); // all ones + __ xorpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector binary operations. +static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +static void CreateVecTerOpLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type"; + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddb(dst, other_src, src) : __ paddb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddw(dst, other_src, src) : __ paddw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddd(dst, other_src, src) : __ paddd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpaddq(dst, other_src, src) : __ paddq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vaddps(dst, other_src, src) : __ addps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vaddpd(dst, other_src, src) : __ addpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ paddusb(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ paddsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ paddusw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ paddsw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + DCHECK(instruction->IsRounded()); + + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pavgb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pavgw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubb(dst, other_src, src) : __ psubb(dst, src); + break; + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubw(dst, other_src, src) : __ psubw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubd(dst, other_src, src) : __ psubd(dst, src); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpsubq(dst, other_src, src) : __ psubq(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vsubps(dst, other_src, src) : __ subps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vsubpd(dst, other_src, src) : __ subpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecSaturationSub(HVecSaturationSub* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecSaturationSub(HVecSaturationSub* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ psubusb(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ psubsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psubusw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psubsw(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpmullw(dst, other_src, src) : __ pmullw(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vpmulld(dst, other_src, src): __ pmulld(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vmulps(dst, other_src, src) : __ mulps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vmulpd(dst, other_src, src) : __ mulpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vdivps(dst, other_src, src) : __ divps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vdivpd(dst, other_src, src) : __ divpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pminub(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pminsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pminuw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pminsw(dst, src); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pminud(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pminsd(dst, src); + break; + // Next cases are sloppy wrt 0.0 vs -0.0. + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ minps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ minpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pmaxub(dst, src); + break; + case DataType::Type::kInt8: + DCHECK_EQ(16u, instruction->GetVectorLength()); + __ pmaxsb(dst, src); + break; + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pmaxuw(dst, src); + break; + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ pmaxsw(dst, src); + break; + case DataType::Type::kUint32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pmaxud(dst, src); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pmaxsd(dst, src); + break; + // Next cases are sloppy wrt 0.0 vs -0.0. + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ maxps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ maxpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpand(dst, other_src, src) : __ pand(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandps(dst, other_src, src) : __ andps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandpd(dst, other_src, src) : __ andpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpandn(dst, other_src, src) : __ pandn(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandnps(dst, other_src, src) : __ andnps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vandnpd(dst, other_src, src) : __ andnpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpor(dst, other_src, src) : __ por(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vorps(dst, other_src, src) : __ orps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vorpd(dst, other_src, src) : __ orpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) { + if (CpuHasAvxFeatureFlag()) { + CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction); + } else { + CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister other_src = locations->InAt(0).AsFpuRegister(); + XmmRegister src = locations->InAt(1).AsFpuRegister(); + XmmRegister dst = locations->Out().AsFpuRegister(); + DCHECK(cpu_has_avx || other_src == dst); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + cpu_has_avx ? __ vpxor(dst, other_src, src) : __ pxor(dst, src); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + cpu_has_avx ? __ vxorps(dst, other_src, src) : __ xorps(dst, src); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + cpu_has_avx ? __ vxorpd(dst, other_src, src) : __ xorpd(dst, src); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector shift operations. +static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psllw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ pslld(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ psllq(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psraw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ psrad(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) { + CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) { + LocationSummary* locations = instruction->GetLocations(); + DCHECK(locations->InAt(0).Equals(locations->Out())); + int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); + XmmRegister dst = locations->Out().AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint16: + case DataType::Type::kInt16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + __ psrlw(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ psrld(dst, Immediate(static_cast(value))); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ psrlq(dst, Immediate(static_cast(value))); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + HInstruction* input = instruction->InputAt(0); + bool is_zero = IsZeroBitPattern(input); + + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant()) + : Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecSetScalars(HVecSetScalars* instruction) { + LocationSummary* locations = instruction->GetLocations(); + XmmRegister dst = locations->Out().AsFpuRegister(); + + DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented + + // Zero out all other elements first. + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst); + + // Shorthand for any type of zero. + if (IsZeroBitPattern(instruction->InputAt(0))) { + return; + } + + // Set required elements. + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: // TODO: up to here, and? + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + case DataType::Type::kInt32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); + break; + case DataType::Type::kInt64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ movd(dst, locations->InAt(0).AsRegister()); // is 64-bit + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + __ movss(dst, locations->InAt(0).AsFpuRegister()); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + __ movsd(dst, locations->InAt(0).AsFpuRegister()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector accumulations. +static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) { + // TODO: pmaddwd? + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction); +} + +void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) { + // TODO: psadbw for unsigned? + LOG(FATAL) << "No SIMD for " << instruction->GetId(); +} + +void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction); + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetOut(Location::SameAsFirstInput()); + locations->AddTemp(Location::RequiresFpuRegister()); +} + +void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) { + bool cpu_has_avx = CpuHasAvxFeatureFlag(); + LocationSummary* locations = instruction->GetLocations(); + XmmRegister acc = locations->InAt(0).AsFpuRegister(); + XmmRegister left = locations->InAt(1).AsFpuRegister(); + XmmRegister right = locations->InAt(2).AsFpuRegister(); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt32: { + DCHECK_EQ(4u, instruction->GetVectorLength()); + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + if (!cpu_has_avx) { + __ movaps(tmp, right); + __ pmaddwd(tmp, left); + __ paddd(acc, tmp); + } else { + __ vpmaddwd(tmp, left, right); + __ vpaddd(acc, acc, tmp); + } + break; + } + default: + LOG(FATAL) << "Unsupported SIMD Type" << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to set up locations for vector memory operations. +static void CreateVecMemLocations(ArenaAllocator* allocator, + HVecMemoryOperation* instruction, + bool is_load) { + LocationSummary* locations = new (allocator) LocationSummary(instruction); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + if (is_load) { + locations->SetOut(Location::RequiresFpuRegister()); + } else { + locations->SetInAt(2, Location::RequiresFpuRegister()); + } + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +// Helper to construct address for vector memory operations. +static Address VecAddress(LocationSummary* locations, size_t size, bool is_string_char_at) { + Location base = locations->InAt(0); + Location index = locations->InAt(1); + ScaleFactor scale = TIMES_1; + switch (size) { + case 2: scale = TIMES_2; break; + case 4: scale = TIMES_4; break; + case 8: scale = TIMES_8; break; + default: break; + } + // Incorporate the string or array offset in the address computation. + uint32_t offset = is_string_char_at + ? mirror::String::ValueOffset().Uint32Value() + : mirror::Array::DataOffset(size).Uint32Value(); + return CodeGeneratorX86_64::ArrayAddress(base.AsRegister(), index, scale, offset); +} + +void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true); + // String load requires a temporary for the compressed load. + if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister()); + } +} + +void InstructionCodeGeneratorX86_64::VisitVecLoad(HVecLoad* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + Address address = VecAddress(locations, size, instruction->IsStringCharAt()); + XmmRegister reg = locations->Out().AsFpuRegister(); + bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); + switch (instruction->GetPackedType()) { + case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt. + case DataType::Type::kUint16: + DCHECK_EQ(8u, instruction->GetVectorLength()); + // Special handling of compressed/uncompressed string load. + if (mirror::kUseStringCompression && instruction->IsStringCharAt()) { + NearLabel done, not_compressed; + XmmRegister tmp = locations->GetTemp(0).AsFpuRegister(); + // Test compression bit. + static_assert(static_cast(mirror::StringCompressionFlag::kCompressed) == 0u, + "Expecting 0=compressed, 1=uncompressed"); + uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + __ testb(Address(locations->InAt(0).AsRegister(), count_offset), Immediate(1)); + __ j(kNotZero, ¬_compressed); + // Zero extend 8 compressed bytes into 8 chars. + __ movsd(reg, VecAddress(locations, 1, instruction->IsStringCharAt())); + __ pxor(tmp, tmp); + __ punpcklbw(reg, tmp); + __ jmp(&done); + // Load 8 direct uncompressed chars. + __ Bind(¬_compressed); + is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); + __ Bind(&done); + return; + } + FALLTHROUGH_INTENDED; + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + is_aligned16 ? __ movdqa(reg, address) : __ movdqu(reg, address); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + is_aligned16 ? __ movaps(reg, address) : __ movups(reg, address); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + is_aligned16 ? __ movapd(reg, address) : __ movupd(reg, address); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) { + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false); +} + +void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) { + LocationSummary* locations = instruction->GetLocations(); + size_t size = DataType::Size(instruction->GetPackedType()); + Address address = VecAddress(locations, size, /*is_string_char_at*/ false); + XmmRegister reg = locations->InAt(2).AsFpuRegister(); + bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16); + switch (instruction->GetPackedType()) { + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + DCHECK_LE(2u, instruction->GetVectorLength()); + DCHECK_LE(instruction->GetVectorLength(), 16u); + is_aligned16 ? __ movdqa(address, reg) : __ movdqu(address, reg); + break; + case DataType::Type::kFloat32: + DCHECK_EQ(4u, instruction->GetVectorLength()); + is_aligned16 ? __ movaps(address, reg) : __ movups(address, reg); + break; + case DataType::Type::kFloat64: + DCHECK_EQ(2u, instruction->GetVectorLength()); + is_aligned16 ? __ movapd(address, reg) : __ movupd(address, reg); + break; + default: + LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); + UNREACHABLE(); + } +} + +#undef __ + +} // namespace x86_64 +} // namespace art diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc new file mode 100644 index 0000000..ed1a536 --- /dev/null +++ b/compiler/optimizing/code_generator_x86.cc @@ -0,0 +1,8568 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "code_generator_x86.h" + +#include "art_method-inl.h" +#include "class_table.h" +#include "code_generator_utils.h" +#include "compiled_method.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints_enum.h" +#include "gc/accounting/card_table.h" +#include "gc/space/image_space.h" +#include "heap_poisoning.h" +#include "intrinsics.h" +#include "intrinsics_x86.h" +#include "jit/profiling_info.h" +#include "linker/linker_patch.h" +#include "lock_word.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "scoped_thread_state_change-inl.h" +#include "thread.h" +#include "utils/assembler.h" +#include "utils/stack_checks.h" +#include "utils/x86/assembler_x86.h" +#include "utils/x86/managed_register_x86.h" + +namespace art { + +template +class GcRoot; + +namespace x86 { + +static constexpr int kCurrentMethodStackOffset = 0; +static constexpr Register kMethodRegisterArgument = EAX; +static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI }; + +static constexpr int kC2ConditionMask = 0x400; + +static constexpr int kFakeReturnRegister = Register(8); + +static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000); +static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000); + +static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() { + InvokeRuntimeCallingConvention calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK() + // that the the kPrimNot result register is the same as the first argument register. + return caller_saves; +} + +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(codegen->GetAssembler())-> // NOLINT +#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value() + +class NullCheckSlowPathX86 : public SlowPathCode { + public: + explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + x86_codegen->InvokeRuntime(kQuickThrowNullPointer, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "NullCheckSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86); +}; + +class DivZeroCheckSlowPathX86 : public SlowPathCode { + public: + explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "DivZeroCheckSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86); +}; + +class DivRemMinusOneSlowPathX86 : public SlowPathCode { + public: + DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div) + : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + __ Bind(GetEntryLabel()); + if (is_div_) { + __ negl(reg_); + } else { + __ movl(reg_, Immediate(0)); + } + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "DivRemMinusOneSlowPathX86"; } + + private: + Register reg_; + bool is_div_; + DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86); +}; + +class BoundsCheckSlowPathX86 : public SlowPathCode { + public: + explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + if (instruction_->CanThrowIntoCatchBlock()) { + // Live registers will be restored in the catch block if caught. + SaveLiveRegisters(codegen, instruction_->GetLocations()); + } + + // Are we using an array length from memory? + HInstruction* array_length = instruction_->InputAt(1); + Location length_loc = locations->InAt(1); + InvokeRuntimeCallingConvention calling_convention; + if (array_length->IsArrayLength() && array_length->IsEmittedAtUseSite()) { + // Load the array length into our temporary. + HArrayLength* length = array_length->AsArrayLength(); + uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(length); + Location array_loc = array_length->GetLocations()->InAt(0); + Address array_len(array_loc.AsRegister(), len_offset); + length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(1)); + // Check for conflicts with index. + if (length_loc.Equals(locations->InAt(0))) { + // We know we aren't using parameter 2. + length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2)); + } + __ movl(length_loc.AsRegister(), array_len); + if (mirror::kUseStringCompression && length->IsStringLength()) { + __ shrl(length_loc.AsRegister(), Immediate(1)); + } + } + x86_codegen->EmitParallelMoves( + locations->InAt(0), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + DataType::Type::kInt32, + length_loc, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32); + QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt() + ? kQuickThrowStringBounds + : kQuickThrowArrayBounds; + x86_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + CheckEntrypointTypes(); + } + + bool IsFatal() const override { return true; } + + const char* GetDescription() const override { return "BoundsCheckSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); +}; + +class SuspendCheckSlowPathX86 : public SlowPathCode { + public: + SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) + : SlowPathCode(instruction), successor_(successor) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD. + x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD. + if (successor_ == nullptr) { + __ jmp(GetReturnLabel()); + } else { + __ jmp(x86_codegen->GetLabelOf(successor_)); + } + } + + Label* GetReturnLabel() { + DCHECK(successor_ == nullptr); + return &return_label_; + } + + HBasicBlock* GetSuccessor() const { + return successor_; + } + + const char* GetDescription() const override { return "SuspendCheckSlowPathX86"; } + + private: + HBasicBlock* const successor_; + Label return_label_; + + DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86); +}; + +class LoadStringSlowPathX86 : public SlowPathCode { + public: + explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex(); + __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index.index_)); + x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); + RestoreLiveRegisters(codegen, locations); + + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadStringSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86); +}; + +class LoadClassSlowPathX86 : public SlowPathCode { + public: + LoadClassSlowPathX86(HLoadClass* cls, HInstruction* at) + : SlowPathCode(at), cls_(cls) { + DCHECK(at->IsLoadClass() || at->IsClinitCheck()); + DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Location out = locations->Out(); + const uint32_t dex_pc = instruction_->GetDexPc(); + bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath(); + bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck(); + + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + if (must_resolve_type) { + DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_codegen->GetGraph()->GetDexFile())); + dex::TypeIndex type_index = cls_->GetTypeIndex(); + __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_)); + x86_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this); + CheckEntrypointTypes(); + // If we also must_do_clinit, the resolved type is now in the correct register. + } else { + DCHECK(must_do_clinit); + Location source = instruction_->IsLoadClass() ? out : locations->InAt(0); + x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), source); + } + if (must_do_clinit) { + x86_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this); + CheckEntrypointTypes(); + } + + // Move the class to the desired location. + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + x86_codegen->Move32(out, Location::RegisterLocation(EAX)); + } + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "LoadClassSlowPathX86"; } + + private: + // The class this slow path will load. + HLoadClass* const cls_; + + DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86); +}; + +class TypeCheckSlowPathX86 : public SlowPathCode { + public: + TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal) + : SlowPathCode(instruction), is_fatal_(is_fatal) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(instruction_->IsCheckCast() + || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + + if (kPoisonHeapReferences && + instruction_->IsCheckCast() && + instruction_->AsCheckCast()->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) { + // First, unpoison the `cls` reference that was poisoned for direct memory comparison. + __ UnpoisonHeapReference(locations->InAt(1).AsRegister()); + } + + if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) { + SaveLiveRegisters(codegen, locations); + } + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + x86_codegen->EmitParallelMoves(locations->InAt(0), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + locations->InAt(1), + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + DataType::Type::kReference); + if (instruction_->IsInstanceOf()) { + x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } else { + DCHECK(instruction_->IsCheckCast()); + x86_codegen->InvokeRuntime(kQuickCheckInstanceOf, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes(); + } + + if (!is_fatal_) { + if (instruction_->IsInstanceOf()) { + x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); + } + RestoreLiveRegisters(codegen, locations); + + __ jmp(GetExitLabel()); + } + } + + const char* GetDescription() const override { return "TypeCheckSlowPathX86"; } + bool IsFatal() const override { return is_fatal_; } + + private: + const bool is_fatal_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); +}; + +class DeoptimizationSlowPathX86 : public SlowPathCode { + public: + explicit DeoptimizationSlowPathX86(HDeoptimize* instruction) + : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86* x86_codegen = down_cast(codegen); + __ Bind(GetEntryLabel()); + LocationSummary* locations = instruction_->GetLocations(); + SaveLiveRegisters(codegen, locations); + InvokeRuntimeCallingConvention calling_convention; + x86_codegen->Load32BitValue( + calling_convention.GetRegisterAt(0), + static_cast(instruction_->AsDeoptimize()->GetDeoptimizationKind())); + x86_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + } + + const char* GetDescription() const override { return "DeoptimizationSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86); +}; + +class ArraySetSlowPathX86 : public SlowPathCode { + public: + explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove( + locations->InAt(0), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + nullptr); + parallel_move.AddMove( + locations->InAt(1), + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + DataType::Type::kInt32, + nullptr); + parallel_move.AddMove( + locations->InAt(2), + Location::RegisterLocation(calling_convention.GetRegisterAt(2)), + DataType::Type::kReference, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + CodeGeneratorX86* x86_codegen = down_cast(codegen); + x86_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes(); + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "ArraySetSlowPathX86"; } + + private: + DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86); +}; + +// Slow path marking an object reference `ref` during a read +// barrier. The field `obj.field` in the object `obj` holding this +// reference does not get updated by this slow path after marking (see +// ReadBarrierMarkAndUpdateFieldSlowPathX86 below for that). +// +// This means that after the execution of this slow path, `ref` will +// always be up-to-date, but `obj.field` may not; i.e., after the +// flip, `ref` will be a to-space reference, but `obj.field` will +// probably still be a from-space reference (unless it gets updated by +// another thread, or if another thread installed another object +// reference (different from `ref`) in `obj.field`). +class ReadBarrierMarkSlowPathX86 : public SlowPathCode { + public: + ReadBarrierMarkSlowPathX86(HInstruction* instruction, + Location ref, + bool unpoison_ref_before_marking) + : SlowPathCode(instruction), + ref_(ref), + unpoison_ref_before_marking_(unpoison_ref_before_marking) { + DCHECK(kEmitCompilerReadBarrier); + } + + const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Register ref_reg = ref_.AsRegister(); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg; + DCHECK(instruction_->IsInstanceFieldGet() || + instruction_->IsStaticFieldGet() || + instruction_->IsArrayGet() || + instruction_->IsArraySet() || + instruction_->IsLoadClass() || + instruction_->IsLoadString() || + instruction_->IsInstanceOf() || + instruction_->IsCheckCast() || + (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) || + (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier marking slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + if (unpoison_ref_before_marking_) { + // Object* ref = ref_addr->AsMirrorPtr() + __ MaybeUnpoisonHeapReference(ref_reg); + } + // No need to save live registers; it's taken care of by the + // entrypoint. Also, there is no need to update the stack mask, + // as this runtime call will not trigger a garbage collection. + CodeGeneratorX86* x86_codegen = down_cast(codegen); + DCHECK_NE(ref_reg, ESP); + DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg; + // "Compact" slow path, saving two moves. + // + // Instead of using the standard runtime calling convention (input + // and output in EAX): + // + // EAX <- ref + // EAX <- ReadBarrierMark(EAX) + // ref <- EAX + // + // we just use rX (the register containing `ref`) as input and output + // of a dedicated entrypoint: + // + // rX <- ReadBarrierMarkRegX(rX) + // + int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ref_reg); + // This runtime call does not require a stack map. + x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this); + __ jmp(GetExitLabel()); + } + + private: + // The location (register) of the marked object reference. + const Location ref_; + // Should the reference in `ref_` be unpoisoned prior to marking it? + const bool unpoison_ref_before_marking_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86); +}; + +// Slow path marking an object reference `ref` during a read barrier, +// and if needed, atomically updating the field `obj.field` in the +// object `obj` holding this reference after marking (contrary to +// ReadBarrierMarkSlowPathX86 above, which never tries to update +// `obj.field`). +// +// This means that after the execution of this slow path, both `ref` +// and `obj.field` will be up-to-date; i.e., after the flip, both will +// hold the same to-space reference (unless another thread installed +// another object reference (different from `ref`) in `obj.field`). +class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode { + public: + ReadBarrierMarkAndUpdateFieldSlowPathX86(HInstruction* instruction, + Location ref, + Register obj, + const Address& field_addr, + bool unpoison_ref_before_marking, + Register temp) + : SlowPathCode(instruction), + ref_(ref), + obj_(obj), + field_addr_(field_addr), + unpoison_ref_before_marking_(unpoison_ref_before_marking), + temp_(temp) { + DCHECK(kEmitCompilerReadBarrier); + } + + const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Register ref_reg = ref_.AsRegister(); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg; + // This slow path is only used by the UnsafeCASObject intrinsic. + DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier marking and field updating slow path: " + << instruction_->DebugName(); + DCHECK(instruction_->GetLocations()->Intrinsified()); + DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject); + + __ Bind(GetEntryLabel()); + if (unpoison_ref_before_marking_) { + // Object* ref = ref_addr->AsMirrorPtr() + __ MaybeUnpoisonHeapReference(ref_reg); + } + + // Save the old (unpoisoned) reference. + __ movl(temp_, ref_reg); + + // No need to save live registers; it's taken care of by the + // entrypoint. Also, there is no need to update the stack mask, + // as this runtime call will not trigger a garbage collection. + CodeGeneratorX86* x86_codegen = down_cast(codegen); + DCHECK_NE(ref_reg, ESP); + DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg; + // "Compact" slow path, saving two moves. + // + // Instead of using the standard runtime calling convention (input + // and output in EAX): + // + // EAX <- ref + // EAX <- ReadBarrierMark(EAX) + // ref <- EAX + // + // we just use rX (the register containing `ref`) as input and output + // of a dedicated entrypoint: + // + // rX <- ReadBarrierMarkRegX(rX) + // + int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ref_reg); + // This runtime call does not require a stack map. + x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this); + + // If the new reference is different from the old reference, + // update the field in the holder (`*field_addr`). + // + // Note that this field could also hold a different object, if + // another thread had concurrently changed it. In that case, the + // LOCK CMPXCHGL instruction in the compare-and-set (CAS) + // operation below would abort the CAS, leaving the field as-is. + NearLabel done; + __ cmpl(temp_, ref_reg); + __ j(kEqual, &done); + + // Update the the holder's field atomically. This may fail if + // mutator updates before us, but it's OK. This is achieved + // using a strong compare-and-set (CAS) operation with relaxed + // memory synchronization ordering, where the expected value is + // the old reference and the desired value is the new reference. + // This operation is implemented with a 32-bit LOCK CMPXLCHG + // instruction, which requires the expected value (the old + // reference) to be in EAX. Save EAX beforehand, and move the + // expected value (stored in `temp_`) into EAX. + __ pushl(EAX); + __ movl(EAX, temp_); + + // Convenience aliases. + Register base = obj_; + Register expected = EAX; + Register value = ref_reg; + + bool base_equals_value = (base == value); + if (kPoisonHeapReferences) { + if (base_equals_value) { + // If `base` and `value` are the same register location, move + // `value` to a temporary register. This way, poisoning + // `value` won't invalidate `base`. + value = temp_; + __ movl(value, base); + } + + // Check that the register allocator did not assign the location + // of `expected` (EAX) to `value` nor to `base`, so that heap + // poisoning (when enabled) works as intended below. + // - If `value` were equal to `expected`, both references would + // be poisoned twice, meaning they would not be poisoned at + // all, as heap poisoning uses address negation. + // - If `base` were equal to `expected`, poisoning `expected` + // would invalidate `base`. + DCHECK_NE(value, expected); + DCHECK_NE(base, expected); + + __ PoisonHeapReference(expected); + __ PoisonHeapReference(value); + } + + __ LockCmpxchgl(field_addr_, value); + + // If heap poisoning is enabled, we need to unpoison the values + // that were poisoned earlier. + if (kPoisonHeapReferences) { + if (base_equals_value) { + // `value` has been moved to a temporary register, no need + // to unpoison it. + } else { + __ UnpoisonHeapReference(value); + } + // No need to unpoison `expected` (EAX), as it is be overwritten below. + } + + // Restore EAX. + __ popl(EAX); + + __ Bind(&done); + __ jmp(GetExitLabel()); + } + + private: + // The location (register) of the marked object reference. + const Location ref_; + // The register containing the object holding the marked object reference field. + const Register obj_; + // The address of the marked reference field. The base of this address must be `obj_`. + const Address field_addr_; + + // Should the reference in `ref_` be unpoisoned prior to marking it? + const bool unpoison_ref_before_marking_; + + const Register temp_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86); +}; + +// Slow path generating a read barrier for a heap reference. +class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { + public: + ReadBarrierForHeapReferenceSlowPathX86(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) + : SlowPathCode(instruction), + out_(out), + ref_(ref), + obj_(obj), + offset_(offset), + index_(index) { + DCHECK(kEmitCompilerReadBarrier); + // If `obj` is equal to `out` or `ref`, it means the initial object + // has been overwritten by (or after) the heap object reference load + // to be instrumented, e.g.: + // + // __ movl(out, Address(out, offset)); + // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); + // + // In that case, we have lost the information about the original + // object, and the emitted read barrier cannot work properly. + DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out; + DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref; + } + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86* x86_codegen = down_cast(codegen); + LocationSummary* locations = instruction_->GetLocations(); + Register reg_out = out_.AsRegister(); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); + DCHECK(instruction_->IsInstanceFieldGet() || + instruction_->IsStaticFieldGet() || + instruction_->IsArrayGet() || + instruction_->IsInstanceOf() || + instruction_->IsCheckCast() || + (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier for heap reference slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + // We may have to change the index's value, but as `index_` is a + // constant member (like other "inputs" of this slow path), + // introduce a copy of it, `index`. + Location index = index_; + if (index_.IsValid()) { + // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics. + if (instruction_->IsArrayGet()) { + // Compute the actual memory offset and store it in `index`. + Register index_reg = index_.AsRegister(); + DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg)); + if (codegen->IsCoreCalleeSaveRegister(index_reg)) { + // We are about to change the value of `index_reg` (see the + // calls to art::x86::X86Assembler::shll and + // art::x86::X86Assembler::AddImmediate below), but it has + // not been saved by the previous call to + // art::SlowPathCode::SaveLiveRegisters, as it is a + // callee-save register -- + // art::SlowPathCode::SaveLiveRegisters does not consider + // callee-save registers, as it has been designed with the + // assumption that callee-save registers are supposed to be + // handled by the called function. So, as a callee-save + // register, `index_reg` _would_ eventually be saved onto + // the stack, but it would be too late: we would have + // changed its value earlier. Therefore, we manually save + // it here into another freely available register, + // `free_reg`, chosen of course among the caller-save + // registers (as a callee-save `free_reg` register would + // exhibit the same problem). + // + // Note we could have requested a temporary register from + // the register allocator instead; but we prefer not to, as + // this is a slow path, and we know we can find a + // caller-save register that is available. + Register free_reg = FindAvailableCallerSaveRegister(codegen); + __ movl(free_reg, index_reg); + index_reg = free_reg; + index = Location::RegisterLocation(index_reg); + } else { + // The initial register stored in `index_` has already been + // saved in the call to art::SlowPathCode::SaveLiveRegisters + // (as it is not a callee-save register), so we can freely + // use it. + } + // Shifting the index value contained in `index_reg` by the scale + // factor (2) cannot overflow in practice, as the runtime is + // unable to allocate object arrays with a size larger than + // 2^26 - 1 (that is, 2^28 - 4 bytes). + __ shll(index_reg, Immediate(TIMES_4)); + static_assert( + sizeof(mirror::HeapReference) == sizeof(int32_t), + "art::mirror::HeapReference and int32_t have different sizes."); + __ AddImmediate(index_reg, Immediate(offset_)); + } else { + // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile + // intrinsics, `index_` is not shifted by a scale factor of 2 + // (as in the case of ArrayGet), as it is actually an offset + // to an object field within an object. + DCHECK(instruction_->IsInvoke()) << instruction_->DebugName(); + DCHECK(instruction_->GetLocations()->Intrinsified()); + DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) || + (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)) + << instruction_->AsInvoke()->GetIntrinsic(); + DCHECK_EQ(offset_, 0U); + DCHECK(index_.IsRegisterPair()); + // UnsafeGet's offset location is a register pair, the low + // part contains the correct offset. + index = index_.ToLow(); + } + } + + // We're moving two or three locations to locations that could + // overlap, so we need a parallel move resolver. + InvokeRuntimeCallingConvention calling_convention; + HParallelMove parallel_move(codegen->GetGraph()->GetAllocator()); + parallel_move.AddMove(ref_, + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + DataType::Type::kReference, + nullptr); + parallel_move.AddMove(obj_, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + DataType::Type::kReference, + nullptr); + if (index.IsValid()) { + parallel_move.AddMove(index, + Location::RegisterLocation(calling_convention.GetRegisterAt(2)), + DataType::Type::kInt32, + nullptr); + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + } else { + codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + __ movl(calling_convention.GetRegisterAt(2), Immediate(offset_)); + } + x86_codegen->InvokeRuntime(kQuickReadBarrierSlow, instruction_, instruction_->GetDexPc(), this); + CheckEntrypointTypes< + kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>(); + x86_codegen->Move32(out_, Location::RegisterLocation(EAX)); + + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathX86"; } + + private: + Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) { + size_t ref = static_cast(ref_.AsRegister()); + size_t obj = static_cast(obj_.AsRegister()); + for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { + if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) { + return static_cast(i); + } + } + // We shall never fail to find a free caller-save register, as + // there are more than two core caller-save registers on x86 + // (meaning it is possible to find one which is different from + // `ref` and `obj`). + DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u); + LOG(FATAL) << "Could not find a free caller-save register"; + UNREACHABLE(); + } + + const Location out_; + const Location ref_; + const Location obj_; + const uint32_t offset_; + // An additional location containing an index to an array. + // Only used for HArrayGet and the UnsafeGetObject & + // UnsafeGetObjectVolatile intrinsics. + const Location index_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathX86); +}; + +// Slow path generating a read barrier for a GC root. +class ReadBarrierForRootSlowPathX86 : public SlowPathCode { + public: + ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root) + : SlowPathCode(instruction), out_(out), root_(root) { + DCHECK(kEmitCompilerReadBarrier); + } + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + Register reg_out = out_.AsRegister(); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); + DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) + << "Unexpected instruction in read barrier for GC root slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + CodeGeneratorX86* x86_codegen = down_cast(codegen); + x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_); + x86_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes*>(); + x86_codegen->Move32(out_, Location::RegisterLocation(EAX)); + + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { return "ReadBarrierForRootSlowPathX86"; } + + private: + const Location out_; + const Location root_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86); +}; + +#undef __ +// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. +#define __ down_cast(GetAssembler())-> // NOLINT + +inline Condition X86Condition(IfCondition cond) { + switch (cond) { + case kCondEQ: return kEqual; + case kCondNE: return kNotEqual; + case kCondLT: return kLess; + case kCondLE: return kLessEqual; + case kCondGT: return kGreater; + case kCondGE: return kGreaterEqual; + case kCondB: return kBelow; + case kCondBE: return kBelowEqual; + case kCondA: return kAbove; + case kCondAE: return kAboveEqual; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +// Maps signed condition to unsigned condition and FP condition to x86 name. +inline Condition X86UnsignedOrFPCondition(IfCondition cond) { + switch (cond) { + case kCondEQ: return kEqual; + case kCondNE: return kNotEqual; + // Signed to unsigned, and FP to x86 name. + case kCondLT: return kBelow; + case kCondLE: return kBelowEqual; + case kCondGT: return kAbove; + case kCondGE: return kAboveEqual; + // Unsigned remain unchanged. + case kCondB: return kBelow; + case kCondBE: return kBelowEqual; + case kCondA: return kAbove; + case kCondAE: return kAboveEqual; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const { + stream << Register(reg); +} + +void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const { + stream << XmmRegister(reg); +} + +const X86InstructionSetFeatures& CodeGeneratorX86::GetInstructionSetFeatures() const { + return *GetCompilerOptions().GetInstructionSetFeatures()->AsX86InstructionSetFeatures(); +} + +size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { + __ movl(Address(ESP, stack_index), static_cast(reg_id)); + return kX86WordSize; +} + +size_t CodeGeneratorX86::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { + __ movl(static_cast(reg_id), Address(ESP, stack_index)); + return kX86WordSize; +} + +size_t CodeGeneratorX86::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { + if (GetGraph()->HasSIMD()) { + __ movups(Address(ESP, stack_index), XmmRegister(reg_id)); + } else { + __ movsd(Address(ESP, stack_index), XmmRegister(reg_id)); + } + return GetSlowPathFPWidth(); +} + +size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { + if (GetGraph()->HasSIMD()) { + __ movups(XmmRegister(reg_id), Address(ESP, stack_index)); + } else { + __ movsd(XmmRegister(reg_id), Address(ESP, stack_index)); + } + return GetSlowPathFPWidth(); +} + +void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + ValidateInvokeRuntime(entrypoint, instruction, slow_path); + GenerateInvokeRuntime(GetThreadOffset(entrypoint).Int32Value()); + if (EntrypointRequiresStackMap(entrypoint)) { + RecordPcInfo(instruction, dex_pc, slow_path); + } +} + +void CodeGeneratorX86::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, + HInstruction* instruction, + SlowPathCode* slow_path) { + ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path); + GenerateInvokeRuntime(entry_point_offset); +} + +void CodeGeneratorX86::GenerateInvokeRuntime(int32_t entry_point_offset) { + __ fs()->call(Address::Absolute(entry_point_offset)); +} + +CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats) + : CodeGenerator(graph, + kNumberOfCpuRegisters, + kNumberOfXmmRegisters, + kNumberOfRegisterPairs, + ComputeRegisterMask(reinterpret_cast(kCoreCalleeSaves), + arraysize(kCoreCalleeSaves)) + | (1 << kFakeReturnRegister), + 0, + compiler_options, + stats), + block_labels_(nullptr), + location_builder_(graph, this), + instruction_visitor_(graph, this), + move_resolver_(graph->GetAllocator(), this), + assembler_(graph->GetAllocator()), + boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + constant_area_start_(-1), + fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)), + method_address_offset_(std::less(), + graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) { + // Use a fake return address register to mimic Quick. + AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); +} + +void CodeGeneratorX86::SetupBlockedRegisters() const { + // Stack register is always reserved. + blocked_core_registers_[ESP] = true; +} + +InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen) + : InstructionCodeGenerator(graph, codegen), + assembler_(codegen->GetAssembler()), + codegen_(codegen) {} + +static dwarf::Reg DWARFReg(Register reg) { + return dwarf::Reg::X86Core(static_cast(reg)); +} + +void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) { + if (GetCompilerOptions().CountHotnessInCompiledCode()) { + Register reg = EAX; + if (is_frame_entry) { + reg = kMethodRegisterArgument; + } else { + __ pushl(EAX); + __ movl(EAX, Address(ESP, kX86WordSize)); + } + NearLabel overflow; + __ cmpw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()), + Immediate(ArtMethod::MaxCounter())); + __ j(kEqual, &overflow); + __ addw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()), + Immediate(1)); + __ Bind(&overflow); + if (!is_frame_entry) { + __ popl(EAX); + } + } + + if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + ScopedObjectAccess soa(Thread::Current()); + ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize); + if (info != nullptr) { + uint32_t address = reinterpret_cast32(info); + NearLabel done; + if (HasEmptyFrame()) { + CHECK(is_frame_entry); + // Alignment + __ subl(ESP, Immediate(8)); + __ cfi().AdjustCFAOffset(8); + // We need a temporary. The stub also expects the method at bottom of stack. + __ pushl(EAX); + __ cfi().AdjustCFAOffset(4); + __ movl(EAX, Immediate(address)); + __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), + Immediate(1)); + __ j(kCarryClear, &done); + GenerateInvokeRuntime( + GetThreadOffset(kQuickCompileOptimized).Int32Value()); + __ Bind(&done); + // We don't strictly require to restore EAX, but this makes the generated + // code easier to reason about. + __ popl(EAX); + __ cfi().AdjustCFAOffset(-4); + __ addl(ESP, Immediate(8)); + __ cfi().AdjustCFAOffset(-8); + } else { + if (!RequiresCurrentMethod()) { + CHECK(is_frame_entry); + __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument); + } + // We need a temporary. + __ pushl(EAX); + __ cfi().AdjustCFAOffset(4); + __ movl(EAX, Immediate(address)); + __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()), + Immediate(1)); + __ popl(EAX); // Put stack as expected before exiting or calling stub. + __ cfi().AdjustCFAOffset(-4); + __ j(kCarryClear, &done); + GenerateInvokeRuntime( + GetThreadOffset(kQuickCompileOptimized).Int32Value()); + __ Bind(&done); + } + } + } +} + +void CodeGeneratorX86::GenerateFrameEntry() { + __ cfi().SetCurrentCFAOffset(kX86WordSize); // return address + __ Bind(&frame_entry_label_); + bool skip_overflow_check = + IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86); + DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); + + if (!skip_overflow_check) { + size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86); + __ testl(EAX, Address(ESP, -static_cast(reserved_bytes))); + RecordPcInfo(nullptr, 0); + } + + if (!HasEmptyFrame()) { + for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) { + Register reg = kCoreCalleeSaves[i]; + if (allocated_registers_.ContainsCoreRegister(reg)) { + __ pushl(reg); + __ cfi().AdjustCFAOffset(kX86WordSize); + __ cfi().RelOffset(DWARFReg(reg), 0); + } + } + + int adjust = GetFrameSize() - FrameEntrySpillSize(); + __ subl(ESP, Immediate(adjust)); + __ cfi().AdjustCFAOffset(adjust); + // Save the current method if we need it. Note that we do not + // do this in HCurrentMethod, as the instruction might have been removed + // in the SSA graph. + if (RequiresCurrentMethod()) { + __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument); + } + + if (GetGraph()->HasShouldDeoptimizeFlag()) { + // Initialize should_deoptimize flag to 0. + __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0)); + } + } + + MaybeIncrementHotness(/* is_frame_entry= */ true); +} + +void CodeGeneratorX86::GenerateFrameExit() { + __ cfi().RememberState(); + if (!HasEmptyFrame()) { + int adjust = GetFrameSize() - FrameEntrySpillSize(); + __ addl(ESP, Immediate(adjust)); + __ cfi().AdjustCFAOffset(-adjust); + + for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) { + Register reg = kCoreCalleeSaves[i]; + if (allocated_registers_.ContainsCoreRegister(reg)) { + __ popl(reg); + __ cfi().AdjustCFAOffset(-static_cast(kX86WordSize)); + __ cfi().Restore(DWARFReg(reg)); + } + } + } + __ ret(); + __ cfi().RestoreState(); + __ cfi().DefCFAOffset(GetFrameSize()); +} + +void CodeGeneratorX86::Bind(HBasicBlock* block) { + __ Bind(GetLabelOf(block)); +} + +Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type type) const { + switch (type) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kUint32: + case DataType::Type::kInt32: + return Location::RegisterLocation(EAX); + + case DataType::Type::kUint64: + case DataType::Type::kInt64: + return Location::RegisterPairLocation(EAX, EDX); + + case DataType::Type::kVoid: + return Location::NoLocation(); + + case DataType::Type::kFloat64: + case DataType::Type::kFloat32: + return Location::FpuRegisterLocation(XMM0); + } + + UNREACHABLE(); +} + +Location InvokeDexCallingConventionVisitorX86::GetMethodLocation() const { + return Location::RegisterLocation(kMethodRegisterArgument); +} + +Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type type) { + switch (type) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: { + uint32_t index = gp_index_++; + stack_index_++; + if (index < calling_convention.GetNumberOfRegisters()) { + return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); + } else { + return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); + } + } + + case DataType::Type::kInt64: { + uint32_t index = gp_index_; + gp_index_ += 2; + stack_index_ += 2; + if (index + 1 < calling_convention.GetNumberOfRegisters()) { + X86ManagedRegister pair = X86ManagedRegister::FromRegisterPair( + calling_convention.GetRegisterPairAt(index)); + return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); + } else { + return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); + } + } + + case DataType::Type::kFloat32: { + uint32_t index = float_index_++; + stack_index_++; + if (index < calling_convention.GetNumberOfFpuRegisters()) { + return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); + } else { + return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1)); + } + } + + case DataType::Type::kFloat64: { + uint32_t index = float_index_++; + stack_index_ += 2; + if (index < calling_convention.GetNumberOfFpuRegisters()) { + return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index)); + } else { + return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2)); + } + } + + case DataType::Type::kUint32: + case DataType::Type::kUint64: + case DataType::Type::kVoid: + LOG(FATAL) << "Unexpected parameter type " << type; + UNREACHABLE(); + } + return Location::NoLocation(); +} + +void CodeGeneratorX86::Move32(Location destination, Location source) { + if (source.Equals(destination)) { + return; + } + if (destination.IsRegister()) { + if (source.IsRegister()) { + __ movl(destination.AsRegister(), source.AsRegister()); + } else if (source.IsFpuRegister()) { + __ movd(destination.AsRegister(), source.AsFpuRegister()); + } else { + DCHECK(source.IsStackSlot()); + __ movl(destination.AsRegister(), Address(ESP, source.GetStackIndex())); + } + } else if (destination.IsFpuRegister()) { + if (source.IsRegister()) { + __ movd(destination.AsFpuRegister(), source.AsRegister()); + } else if (source.IsFpuRegister()) { + __ movaps(destination.AsFpuRegister(), source.AsFpuRegister()); + } else { + DCHECK(source.IsStackSlot()); + __ movss(destination.AsFpuRegister(), Address(ESP, source.GetStackIndex())); + } + } else { + DCHECK(destination.IsStackSlot()) << destination; + if (source.IsRegister()) { + __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister()); + } else if (source.IsFpuRegister()) { + __ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister()); + } else if (source.IsConstant()) { + HConstant* constant = source.GetConstant(); + int32_t value = GetInt32ValueOf(constant); + __ movl(Address(ESP, destination.GetStackIndex()), Immediate(value)); + } else { + DCHECK(source.IsStackSlot()); + __ pushl(Address(ESP, source.GetStackIndex())); + __ popl(Address(ESP, destination.GetStackIndex())); + } + } +} + +void CodeGeneratorX86::Move64(Location destination, Location source) { + if (source.Equals(destination)) { + return; + } + if (destination.IsRegisterPair()) { + if (source.IsRegisterPair()) { + EmitParallelMoves( + Location::RegisterLocation(source.AsRegisterPairHigh()), + Location::RegisterLocation(destination.AsRegisterPairHigh()), + DataType::Type::kInt32, + Location::RegisterLocation(source.AsRegisterPairLow()), + Location::RegisterLocation(destination.AsRegisterPairLow()), + DataType::Type::kInt32); + } else if (source.IsFpuRegister()) { + XmmRegister src_reg = source.AsFpuRegister(); + __ movd(destination.AsRegisterPairLow(), src_reg); + __ psrlq(src_reg, Immediate(32)); + __ movd(destination.AsRegisterPairHigh(), src_reg); + } else { + // No conflict possible, so just do the moves. + DCHECK(source.IsDoubleStackSlot()); + __ movl(destination.AsRegisterPairLow(), Address(ESP, source.GetStackIndex())); + __ movl(destination.AsRegisterPairHigh(), + Address(ESP, source.GetHighStackIndex(kX86WordSize))); + } + } else if (destination.IsFpuRegister()) { + if (source.IsFpuRegister()) { + __ movaps(destination.AsFpuRegister(), source.AsFpuRegister()); + } else if (source.IsDoubleStackSlot()) { + __ movsd(destination.AsFpuRegister(), Address(ESP, source.GetStackIndex())); + } else if (source.IsRegisterPair()) { + size_t elem_size = DataType::Size(DataType::Type::kInt32); + // Create stack space for 2 elements. + __ subl(ESP, Immediate(2 * elem_size)); + __ movl(Address(ESP, 0), source.AsRegisterPairLow()); + __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh()); + __ movsd(destination.AsFpuRegister(), Address(ESP, 0)); + // And remove the temporary stack space we allocated. + __ addl(ESP, Immediate(2 * elem_size)); + } else { + LOG(FATAL) << "Unimplemented"; + } + } else { + DCHECK(destination.IsDoubleStackSlot()) << destination; + if (source.IsRegisterPair()) { + // No conflict possible, so just do the moves. + __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow()); + __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), + source.AsRegisterPairHigh()); + } else if (source.IsFpuRegister()) { + __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister()); + } else if (source.IsConstant()) { + HConstant* constant = source.GetConstant(); + DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant()); + int64_t value = GetInt64ValueOf(constant); + __ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value))); + __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), + Immediate(High32Bits(value))); + } else { + DCHECK(source.IsDoubleStackSlot()) << source; + EmitParallelMoves( + Location::StackSlot(source.GetStackIndex()), + Location::StackSlot(destination.GetStackIndex()), + DataType::Type::kInt32, + Location::StackSlot(source.GetHighStackIndex(kX86WordSize)), + Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)), + DataType::Type::kInt32); + } + } +} + +void CodeGeneratorX86::MoveConstant(Location location, int32_t value) { + DCHECK(location.IsRegister()); + __ movl(location.AsRegister(), Immediate(value)); +} + +void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) { + HParallelMove move(GetGraph()->GetAllocator()); + if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) { + move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr); + move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr); + } else { + move.AddMove(src, dst, dst_type, nullptr); + } + GetMoveResolver()->EmitNativeCode(&move); +} + +void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else if (location.IsRegisterPair()) { + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow())); + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh())); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + +void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) { + if (successor->IsExitBlock()) { + DCHECK(got->GetPrevious()->AlwaysThrows()); + return; // no code needed + } + + HBasicBlock* block = got->GetBlock(); + HInstruction* previous = got->GetPrevious(); + + HLoopInformation* info = block->GetLoopInformation(); + if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { + codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false); + GenerateSuspendCheck(info->GetSuspendCheck(), successor); + return; + } + + if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { + GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); + } + if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { + __ jmp(codegen_->GetLabelOf(successor)); + } +} + +void LocationsBuilderX86::VisitGoto(HGoto* got) { + got->SetLocations(nullptr); +} + +void InstructionCodeGeneratorX86::VisitGoto(HGoto* got) { + HandleGoto(got, got->GetSuccessor()); +} + +void LocationsBuilderX86::VisitTryBoundary(HTryBoundary* try_boundary) { + try_boundary->SetLocations(nullptr); +} + +void InstructionCodeGeneratorX86::VisitTryBoundary(HTryBoundary* try_boundary) { + HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor(); + if (!successor->IsExitBlock()) { + HandleGoto(try_boundary, successor); + } +} + +void LocationsBuilderX86::VisitExit(HExit* exit) { + exit->SetLocations(nullptr); +} + +void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { +} + +template +void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, + LabelType* true_label, + LabelType* false_label) { + if (cond->IsFPConditionTrueIfNaN()) { + __ j(kUnordered, true_label); + } else if (cond->IsFPConditionFalseIfNaN()) { + __ j(kUnordered, false_label); + } + __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label); +} + +template +void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond, + LabelType* true_label, + LabelType* false_label) { + LocationSummary* locations = cond->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + IfCondition if_cond = cond->GetCondition(); + + Register left_high = left.AsRegisterPairHigh(); + Register left_low = left.AsRegisterPairLow(); + IfCondition true_high_cond = if_cond; + IfCondition false_high_cond = cond->GetOppositeCondition(); + Condition final_condition = X86UnsignedOrFPCondition(if_cond); // unsigned on lower part + + // Set the conditions for the test, remembering that == needs to be + // decided using the low words. + switch (if_cond) { + case kCondEQ: + case kCondNE: + // Nothing to do. + break; + case kCondLT: + false_high_cond = kCondGT; + break; + case kCondLE: + true_high_cond = kCondLT; + break; + case kCondGT: + false_high_cond = kCondLT; + break; + case kCondGE: + true_high_cond = kCondGT; + break; + case kCondB: + false_high_cond = kCondA; + break; + case kCondBE: + true_high_cond = kCondB; + break; + case kCondA: + false_high_cond = kCondB; + break; + case kCondAE: + true_high_cond = kCondA; + break; + } + + if (right.IsConstant()) { + int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); + int32_t val_high = High32Bits(value); + int32_t val_low = Low32Bits(value); + + codegen_->Compare32BitValue(left_high, val_high); + if (if_cond == kCondNE) { + __ j(X86Condition(true_high_cond), true_label); + } else if (if_cond == kCondEQ) { + __ j(X86Condition(false_high_cond), false_label); + } else { + __ j(X86Condition(true_high_cond), true_label); + __ j(X86Condition(false_high_cond), false_label); + } + // Must be equal high, so compare the lows. + codegen_->Compare32BitValue(left_low, val_low); + } else if (right.IsRegisterPair()) { + Register right_high = right.AsRegisterPairHigh(); + Register right_low = right.AsRegisterPairLow(); + + __ cmpl(left_high, right_high); + if (if_cond == kCondNE) { + __ j(X86Condition(true_high_cond), true_label); + } else if (if_cond == kCondEQ) { + __ j(X86Condition(false_high_cond), false_label); + } else { + __ j(X86Condition(true_high_cond), true_label); + __ j(X86Condition(false_high_cond), false_label); + } + // Must be equal high, so compare the lows. + __ cmpl(left_low, right_low); + } else { + DCHECK(right.IsDoubleStackSlot()); + __ cmpl(left_high, Address(ESP, right.GetHighStackIndex(kX86WordSize))); + if (if_cond == kCondNE) { + __ j(X86Condition(true_high_cond), true_label); + } else if (if_cond == kCondEQ) { + __ j(X86Condition(false_high_cond), false_label); + } else { + __ j(X86Condition(true_high_cond), true_label); + __ j(X86Condition(false_high_cond), false_label); + } + // Must be equal high, so compare the lows. + __ cmpl(left_low, Address(ESP, right.GetStackIndex())); + } + // The last comparison might be unsigned. + __ j(final_condition, true_label); +} + +void InstructionCodeGeneratorX86::GenerateFPCompare(Location lhs, + Location rhs, + HInstruction* insn, + bool is_double) { + HX86LoadFromConstantTable* const_area = insn->InputAt(1)->AsX86LoadFromConstantTable(); + if (is_double) { + if (rhs.IsFpuRegister()) { + __ ucomisd(lhs.AsFpuRegister(), rhs.AsFpuRegister()); + } else if (const_area != nullptr) { + DCHECK(const_area->IsEmittedAtUseSite()); + __ ucomisd(lhs.AsFpuRegister(), + codegen_->LiteralDoubleAddress( + const_area->GetConstant()->AsDoubleConstant()->GetValue(), + const_area->GetBaseMethodAddress(), + const_area->GetLocations()->InAt(0).AsRegister())); + } else { + DCHECK(rhs.IsDoubleStackSlot()); + __ ucomisd(lhs.AsFpuRegister(), Address(ESP, rhs.GetStackIndex())); + } + } else { + if (rhs.IsFpuRegister()) { + __ ucomiss(lhs.AsFpuRegister(), rhs.AsFpuRegister()); + } else if (const_area != nullptr) { + DCHECK(const_area->IsEmittedAtUseSite()); + __ ucomiss(lhs.AsFpuRegister(), + codegen_->LiteralFloatAddress( + const_area->GetConstant()->AsFloatConstant()->GetValue(), + const_area->GetBaseMethodAddress(), + const_area->GetLocations()->InAt(0).AsRegister())); + } else { + DCHECK(rhs.IsStackSlot()); + __ ucomiss(lhs.AsFpuRegister(), Address(ESP, rhs.GetStackIndex())); + } + } +} + +template +void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condition, + LabelType* true_target_in, + LabelType* false_target_in) { + // Generated branching requires both targets to be explicit. If either of the + // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead. + LabelType fallthrough_target; + LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; + LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; + + LocationSummary* locations = condition->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + DataType::Type type = condition->InputAt(0)->GetType(); + switch (type) { + case DataType::Type::kInt64: + GenerateLongComparesAndJumps(condition, true_target, false_target); + break; + case DataType::Type::kFloat32: + GenerateFPCompare(left, right, condition, false); + GenerateFPJumps(condition, true_target, false_target); + break; + case DataType::Type::kFloat64: + GenerateFPCompare(left, right, condition, true); + GenerateFPJumps(condition, true_target, false_target); + break; + default: + LOG(FATAL) << "Unexpected compare type " << type; + } + + if (false_target != &fallthrough_target) { + __ jmp(false_target); + } + + if (fallthrough_target.IsLinked()) { + __ Bind(&fallthrough_target); + } +} + +static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { + // Moves may affect the eflags register (move zero uses xorl), so the EFLAGS + // are set only strictly before `branch`. We can't use the eflags on long/FP + // conditions if they are materialized due to the complex branching. + return cond->IsCondition() && + cond->GetNext() == branch && + cond->InputAt(0)->GetType() != DataType::Type::kInt64 && + !DataType::IsFloatingPointType(cond->InputAt(0)->GetType()); +} + +template +void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instruction, + size_t condition_input_index, + LabelType* true_target, + LabelType* false_target) { + HInstruction* cond = instruction->InputAt(condition_input_index); + + if (true_target == nullptr && false_target == nullptr) { + // Nothing to do. The code always falls through. + return; + } else if (cond->IsIntConstant()) { + // Constant condition, statically compared against "true" (integer value 1). + if (cond->AsIntConstant()->IsTrue()) { + if (true_target != nullptr) { + __ jmp(true_target); + } + } else { + DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue(); + if (false_target != nullptr) { + __ jmp(false_target); + } + } + return; + } + + // The following code generates these patterns: + // (1) true_target == nullptr && false_target != nullptr + // - opposite condition true => branch to false_target + // (2) true_target != nullptr && false_target == nullptr + // - condition true => branch to true_target + // (3) true_target != nullptr && false_target != nullptr + // - condition true => branch to true_target + // - branch to false_target + if (IsBooleanValueOrMaterializedCondition(cond)) { + if (AreEflagsSetFrom(cond, instruction)) { + if (true_target == nullptr) { + __ j(X86Condition(cond->AsCondition()->GetOppositeCondition()), false_target); + } else { + __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target); + } + } else { + // Materialized condition, compare against 0. + Location lhs = instruction->GetLocations()->InAt(condition_input_index); + if (lhs.IsRegister()) { + __ testl(lhs.AsRegister(), lhs.AsRegister()); + } else { + __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0)); + } + if (true_target == nullptr) { + __ j(kEqual, false_target); + } else { + __ j(kNotEqual, true_target); + } + } + } else { + // Condition has not been materialized, use its inputs as the comparison and + // its condition as the branch condition. + HCondition* condition = cond->AsCondition(); + + // If this is a long or FP comparison that has been folded into + // the HCondition, generate the comparison directly. + DataType::Type type = condition->InputAt(0)->GetType(); + if (type == DataType::Type::kInt64 || DataType::IsFloatingPointType(type)) { + GenerateCompareTestAndBranch(condition, true_target, false_target); + return; + } + + Location lhs = condition->GetLocations()->InAt(0); + Location rhs = condition->GetLocations()->InAt(1); + // LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition). + codegen_->GenerateIntCompare(lhs, rhs); + if (true_target == nullptr) { + __ j(X86Condition(condition->GetOppositeCondition()), false_target); + } else { + __ j(X86Condition(condition->GetCondition()), true_target); + } + } + + // If neither branch falls through (case 3), the conditional branch to `true_target` + // was already emitted (case 2) and we need to emit a jump to `false_target`. + if (true_target != nullptr && false_target != nullptr) { + __ jmp(false_target); + } +} + +void LocationsBuilderX86::VisitIf(HIf* if_instr) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr); + if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) { + locations->SetInAt(0, Location::Any()); + } +} + +void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { + HBasicBlock* true_successor = if_instr->IfTrueSuccessor(); + HBasicBlock* false_successor = if_instr->IfFalseSuccessor(); + Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ? + nullptr : codegen_->GetLabelOf(true_successor); + Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? + nullptr : codegen_->GetLabelOf(false_successor); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); +} + +void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); + InvokeRuntimeCallingConvention calling_convention; + RegisterSet caller_saves = RegisterSet::Empty(); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetCustomSlowPathCallerSaves(caller_saves); + if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { + locations->SetInAt(0, Location::Any()); + } +} + +void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) { + SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath(deoptimize); + GenerateTestAndBranch